ASoC: rt5645: Add struct dmi_system_id "Google Ultima" for chrome platform
[deliverable/linux.git] / drivers / infiniband / hw / qib / qib_mad.c
1 /*
2 * Copyright (c) 2012 Intel Corporation. All rights reserved.
3 * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
4 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35 #include <rdma/ib_smi.h>
36
37 #include "qib.h"
38 #include "qib_mad.h"
39
40 static int reply(struct ib_smp *smp)
41 {
42 /*
43 * The verbs framework will handle the directed/LID route
44 * packet changes.
45 */
46 smp->method = IB_MGMT_METHOD_GET_RESP;
47 if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
48 smp->status |= IB_SMP_DIRECTION;
49 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
50 }
51
52 static int reply_failure(struct ib_smp *smp)
53 {
54 /*
55 * The verbs framework will handle the directed/LID route
56 * packet changes.
57 */
58 smp->method = IB_MGMT_METHOD_GET_RESP;
59 if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
60 smp->status |= IB_SMP_DIRECTION;
61 return IB_MAD_RESULT_FAILURE | IB_MAD_RESULT_REPLY;
62 }
63
64 static void qib_send_trap(struct qib_ibport *ibp, void *data, unsigned len)
65 {
66 struct ib_mad_send_buf *send_buf;
67 struct ib_mad_agent *agent;
68 struct ib_smp *smp;
69 int ret;
70 unsigned long flags;
71 unsigned long timeout;
72
73 agent = ibp->send_agent;
74 if (!agent)
75 return;
76
77 /* o14-3.2.1 */
78 if (!(ppd_from_ibp(ibp)->lflags & QIBL_LINKACTIVE))
79 return;
80
81 /* o14-2 */
82 if (ibp->trap_timeout && time_before(jiffies, ibp->trap_timeout))
83 return;
84
85 send_buf = ib_create_send_mad(agent, 0, 0, 0, IB_MGMT_MAD_HDR,
86 IB_MGMT_MAD_DATA, GFP_ATOMIC,
87 IB_MGMT_BASE_VERSION);
88 if (IS_ERR(send_buf))
89 return;
90
91 smp = send_buf->mad;
92 smp->base_version = IB_MGMT_BASE_VERSION;
93 smp->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
94 smp->class_version = 1;
95 smp->method = IB_MGMT_METHOD_TRAP;
96 ibp->tid++;
97 smp->tid = cpu_to_be64(ibp->tid);
98 smp->attr_id = IB_SMP_ATTR_NOTICE;
99 /* o14-1: smp->mkey = 0; */
100 memcpy(smp->data, data, len);
101
102 spin_lock_irqsave(&ibp->lock, flags);
103 if (!ibp->sm_ah) {
104 if (ibp->sm_lid != be16_to_cpu(IB_LID_PERMISSIVE)) {
105 struct ib_ah *ah;
106
107 ah = qib_create_qp0_ah(ibp, ibp->sm_lid);
108 if (IS_ERR(ah))
109 ret = PTR_ERR(ah);
110 else {
111 send_buf->ah = ah;
112 ibp->sm_ah = to_iah(ah);
113 ret = 0;
114 }
115 } else
116 ret = -EINVAL;
117 } else {
118 send_buf->ah = &ibp->sm_ah->ibah;
119 ret = 0;
120 }
121 spin_unlock_irqrestore(&ibp->lock, flags);
122
123 if (!ret)
124 ret = ib_post_send_mad(send_buf, NULL);
125 if (!ret) {
126 /* 4.096 usec. */
127 timeout = (4096 * (1UL << ibp->subnet_timeout)) / 1000;
128 ibp->trap_timeout = jiffies + usecs_to_jiffies(timeout);
129 } else {
130 ib_free_send_mad(send_buf);
131 ibp->trap_timeout = 0;
132 }
133 }
134
135 /*
136 * Send a bad [PQ]_Key trap (ch. 14.3.8).
137 */
138 void qib_bad_pqkey(struct qib_ibport *ibp, __be16 trap_num, u32 key, u32 sl,
139 u32 qp1, u32 qp2, __be16 lid1, __be16 lid2)
140 {
141 struct ib_mad_notice_attr data;
142
143 if (trap_num == IB_NOTICE_TRAP_BAD_PKEY)
144 ibp->pkey_violations++;
145 else
146 ibp->qkey_violations++;
147 ibp->n_pkt_drops++;
148
149 /* Send violation trap */
150 data.generic_type = IB_NOTICE_TYPE_SECURITY;
151 data.prod_type_msb = 0;
152 data.prod_type_lsb = IB_NOTICE_PROD_CA;
153 data.trap_num = trap_num;
154 data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
155 data.toggle_count = 0;
156 memset(&data.details, 0, sizeof(data.details));
157 data.details.ntc_257_258.lid1 = lid1;
158 data.details.ntc_257_258.lid2 = lid2;
159 data.details.ntc_257_258.key = cpu_to_be32(key);
160 data.details.ntc_257_258.sl_qp1 = cpu_to_be32((sl << 28) | qp1);
161 data.details.ntc_257_258.qp2 = cpu_to_be32(qp2);
162
163 qib_send_trap(ibp, &data, sizeof(data));
164 }
165
166 /*
167 * Send a bad M_Key trap (ch. 14.3.9).
168 */
169 static void qib_bad_mkey(struct qib_ibport *ibp, struct ib_smp *smp)
170 {
171 struct ib_mad_notice_attr data;
172
173 /* Send violation trap */
174 data.generic_type = IB_NOTICE_TYPE_SECURITY;
175 data.prod_type_msb = 0;
176 data.prod_type_lsb = IB_NOTICE_PROD_CA;
177 data.trap_num = IB_NOTICE_TRAP_BAD_MKEY;
178 data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
179 data.toggle_count = 0;
180 memset(&data.details, 0, sizeof(data.details));
181 data.details.ntc_256.lid = data.issuer_lid;
182 data.details.ntc_256.method = smp->method;
183 data.details.ntc_256.attr_id = smp->attr_id;
184 data.details.ntc_256.attr_mod = smp->attr_mod;
185 data.details.ntc_256.mkey = smp->mkey;
186 if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
187 u8 hop_cnt;
188
189 data.details.ntc_256.dr_slid = smp->dr_slid;
190 data.details.ntc_256.dr_trunc_hop = IB_NOTICE_TRAP_DR_NOTICE;
191 hop_cnt = smp->hop_cnt;
192 if (hop_cnt > ARRAY_SIZE(data.details.ntc_256.dr_rtn_path)) {
193 data.details.ntc_256.dr_trunc_hop |=
194 IB_NOTICE_TRAP_DR_TRUNC;
195 hop_cnt = ARRAY_SIZE(data.details.ntc_256.dr_rtn_path);
196 }
197 data.details.ntc_256.dr_trunc_hop |= hop_cnt;
198 memcpy(data.details.ntc_256.dr_rtn_path, smp->return_path,
199 hop_cnt);
200 }
201
202 qib_send_trap(ibp, &data, sizeof(data));
203 }
204
205 /*
206 * Send a Port Capability Mask Changed trap (ch. 14.3.11).
207 */
208 void qib_cap_mask_chg(struct qib_ibport *ibp)
209 {
210 struct ib_mad_notice_attr data;
211
212 data.generic_type = IB_NOTICE_TYPE_INFO;
213 data.prod_type_msb = 0;
214 data.prod_type_lsb = IB_NOTICE_PROD_CA;
215 data.trap_num = IB_NOTICE_TRAP_CAP_MASK_CHG;
216 data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
217 data.toggle_count = 0;
218 memset(&data.details, 0, sizeof(data.details));
219 data.details.ntc_144.lid = data.issuer_lid;
220 data.details.ntc_144.new_cap_mask = cpu_to_be32(ibp->port_cap_flags);
221
222 qib_send_trap(ibp, &data, sizeof(data));
223 }
224
225 /*
226 * Send a System Image GUID Changed trap (ch. 14.3.12).
227 */
228 void qib_sys_guid_chg(struct qib_ibport *ibp)
229 {
230 struct ib_mad_notice_attr data;
231
232 data.generic_type = IB_NOTICE_TYPE_INFO;
233 data.prod_type_msb = 0;
234 data.prod_type_lsb = IB_NOTICE_PROD_CA;
235 data.trap_num = IB_NOTICE_TRAP_SYS_GUID_CHG;
236 data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
237 data.toggle_count = 0;
238 memset(&data.details, 0, sizeof(data.details));
239 data.details.ntc_145.lid = data.issuer_lid;
240 data.details.ntc_145.new_sys_guid = ib_qib_sys_image_guid;
241
242 qib_send_trap(ibp, &data, sizeof(data));
243 }
244
245 /*
246 * Send a Node Description Changed trap (ch. 14.3.13).
247 */
248 void qib_node_desc_chg(struct qib_ibport *ibp)
249 {
250 struct ib_mad_notice_attr data;
251
252 data.generic_type = IB_NOTICE_TYPE_INFO;
253 data.prod_type_msb = 0;
254 data.prod_type_lsb = IB_NOTICE_PROD_CA;
255 data.trap_num = IB_NOTICE_TRAP_CAP_MASK_CHG;
256 data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
257 data.toggle_count = 0;
258 memset(&data.details, 0, sizeof(data.details));
259 data.details.ntc_144.lid = data.issuer_lid;
260 data.details.ntc_144.local_changes = 1;
261 data.details.ntc_144.change_flags = IB_NOTICE_TRAP_NODE_DESC_CHG;
262
263 qib_send_trap(ibp, &data, sizeof(data));
264 }
265
266 static int subn_get_nodedescription(struct ib_smp *smp,
267 struct ib_device *ibdev)
268 {
269 if (smp->attr_mod)
270 smp->status |= IB_SMP_INVALID_FIELD;
271
272 memcpy(smp->data, ibdev->node_desc, sizeof(smp->data));
273
274 return reply(smp);
275 }
276
277 static int subn_get_nodeinfo(struct ib_smp *smp, struct ib_device *ibdev,
278 u8 port)
279 {
280 struct ib_node_info *nip = (struct ib_node_info *)&smp->data;
281 struct qib_devdata *dd = dd_from_ibdev(ibdev);
282 u32 vendor, majrev, minrev;
283 unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */
284
285 /* GUID 0 is illegal */
286 if (smp->attr_mod || pidx >= dd->num_pports ||
287 dd->pport[pidx].guid == 0)
288 smp->status |= IB_SMP_INVALID_FIELD;
289 else
290 nip->port_guid = dd->pport[pidx].guid;
291
292 nip->base_version = 1;
293 nip->class_version = 1;
294 nip->node_type = 1; /* channel adapter */
295 nip->num_ports = ibdev->phys_port_cnt;
296 /* This is already in network order */
297 nip->sys_guid = ib_qib_sys_image_guid;
298 nip->node_guid = dd->pport->guid; /* Use first-port GUID as node */
299 nip->partition_cap = cpu_to_be16(qib_get_npkeys(dd));
300 nip->device_id = cpu_to_be16(dd->deviceid);
301 majrev = dd->majrev;
302 minrev = dd->minrev;
303 nip->revision = cpu_to_be32((majrev << 16) | minrev);
304 nip->local_port_num = port;
305 vendor = dd->vendorid;
306 nip->vendor_id[0] = QIB_SRC_OUI_1;
307 nip->vendor_id[1] = QIB_SRC_OUI_2;
308 nip->vendor_id[2] = QIB_SRC_OUI_3;
309
310 return reply(smp);
311 }
312
313 static int subn_get_guidinfo(struct ib_smp *smp, struct ib_device *ibdev,
314 u8 port)
315 {
316 struct qib_devdata *dd = dd_from_ibdev(ibdev);
317 u32 startgx = 8 * be32_to_cpu(smp->attr_mod);
318 __be64 *p = (__be64 *) smp->data;
319 unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */
320
321 /* 32 blocks of 8 64-bit GUIDs per block */
322
323 memset(smp->data, 0, sizeof(smp->data));
324
325 if (startgx == 0 && pidx < dd->num_pports) {
326 struct qib_pportdata *ppd = dd->pport + pidx;
327 struct qib_ibport *ibp = &ppd->ibport_data;
328 __be64 g = ppd->guid;
329 unsigned i;
330
331 /* GUID 0 is illegal */
332 if (g == 0)
333 smp->status |= IB_SMP_INVALID_FIELD;
334 else {
335 /* The first is a copy of the read-only HW GUID. */
336 p[0] = g;
337 for (i = 1; i < QIB_GUIDS_PER_PORT; i++)
338 p[i] = ibp->guids[i - 1];
339 }
340 } else
341 smp->status |= IB_SMP_INVALID_FIELD;
342
343 return reply(smp);
344 }
345
346 static void set_link_width_enabled(struct qib_pportdata *ppd, u32 w)
347 {
348 (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LWID_ENB, w);
349 }
350
351 static void set_link_speed_enabled(struct qib_pportdata *ppd, u32 s)
352 {
353 (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_SPD_ENB, s);
354 }
355
356 static int get_overrunthreshold(struct qib_pportdata *ppd)
357 {
358 return ppd->dd->f_get_ib_cfg(ppd, QIB_IB_CFG_OVERRUN_THRESH);
359 }
360
361 /**
362 * set_overrunthreshold - set the overrun threshold
363 * @ppd: the physical port data
364 * @n: the new threshold
365 *
366 * Note that this will only take effect when the link state changes.
367 */
368 static int set_overrunthreshold(struct qib_pportdata *ppd, unsigned n)
369 {
370 (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_OVERRUN_THRESH,
371 (u32)n);
372 return 0;
373 }
374
375 static int get_phyerrthreshold(struct qib_pportdata *ppd)
376 {
377 return ppd->dd->f_get_ib_cfg(ppd, QIB_IB_CFG_PHYERR_THRESH);
378 }
379
380 /**
381 * set_phyerrthreshold - set the physical error threshold
382 * @ppd: the physical port data
383 * @n: the new threshold
384 *
385 * Note that this will only take effect when the link state changes.
386 */
387 static int set_phyerrthreshold(struct qib_pportdata *ppd, unsigned n)
388 {
389 (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PHYERR_THRESH,
390 (u32)n);
391 return 0;
392 }
393
394 /**
395 * get_linkdowndefaultstate - get the default linkdown state
396 * @ppd: the physical port data
397 *
398 * Returns zero if the default is POLL, 1 if the default is SLEEP.
399 */
400 static int get_linkdowndefaultstate(struct qib_pportdata *ppd)
401 {
402 return ppd->dd->f_get_ib_cfg(ppd, QIB_IB_CFG_LINKDEFAULT) ==
403 IB_LINKINITCMD_SLEEP;
404 }
405
406 static int check_mkey(struct qib_ibport *ibp, struct ib_smp *smp, int mad_flags)
407 {
408 int valid_mkey = 0;
409 int ret = 0;
410
411 /* Is the mkey in the process of expiring? */
412 if (ibp->mkey_lease_timeout &&
413 time_after_eq(jiffies, ibp->mkey_lease_timeout)) {
414 /* Clear timeout and mkey protection field. */
415 ibp->mkey_lease_timeout = 0;
416 ibp->mkeyprot = 0;
417 }
418
419 if ((mad_flags & IB_MAD_IGNORE_MKEY) || ibp->mkey == 0 ||
420 ibp->mkey == smp->mkey)
421 valid_mkey = 1;
422
423 /* Unset lease timeout on any valid Get/Set/TrapRepress */
424 if (valid_mkey && ibp->mkey_lease_timeout &&
425 (smp->method == IB_MGMT_METHOD_GET ||
426 smp->method == IB_MGMT_METHOD_SET ||
427 smp->method == IB_MGMT_METHOD_TRAP_REPRESS))
428 ibp->mkey_lease_timeout = 0;
429
430 if (!valid_mkey) {
431 switch (smp->method) {
432 case IB_MGMT_METHOD_GET:
433 /* Bad mkey not a violation below level 2 */
434 if (ibp->mkeyprot < 2)
435 break;
436 case IB_MGMT_METHOD_SET:
437 case IB_MGMT_METHOD_TRAP_REPRESS:
438 if (ibp->mkey_violations != 0xFFFF)
439 ++ibp->mkey_violations;
440 if (!ibp->mkey_lease_timeout && ibp->mkey_lease_period)
441 ibp->mkey_lease_timeout = jiffies +
442 ibp->mkey_lease_period * HZ;
443 /* Generate a trap notice. */
444 qib_bad_mkey(ibp, smp);
445 ret = 1;
446 }
447 }
448
449 return ret;
450 }
451
452 static int subn_get_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
453 u8 port)
454 {
455 struct qib_devdata *dd;
456 struct qib_pportdata *ppd;
457 struct qib_ibport *ibp;
458 struct ib_port_info *pip = (struct ib_port_info *)smp->data;
459 u8 mtu;
460 int ret;
461 u32 state;
462 u32 port_num = be32_to_cpu(smp->attr_mod);
463
464 if (port_num == 0)
465 port_num = port;
466 else {
467 if (port_num > ibdev->phys_port_cnt) {
468 smp->status |= IB_SMP_INVALID_FIELD;
469 ret = reply(smp);
470 goto bail;
471 }
472 if (port_num != port) {
473 ibp = to_iport(ibdev, port_num);
474 ret = check_mkey(ibp, smp, 0);
475 if (ret) {
476 ret = IB_MAD_RESULT_FAILURE;
477 goto bail;
478 }
479 }
480 }
481
482 dd = dd_from_ibdev(ibdev);
483 /* IB numbers ports from 1, hdw from 0 */
484 ppd = dd->pport + (port_num - 1);
485 ibp = &ppd->ibport_data;
486
487 /* Clear all fields. Only set the non-zero fields. */
488 memset(smp->data, 0, sizeof(smp->data));
489
490 /* Only return the mkey if the protection field allows it. */
491 if (!(smp->method == IB_MGMT_METHOD_GET &&
492 ibp->mkey != smp->mkey &&
493 ibp->mkeyprot == 1))
494 pip->mkey = ibp->mkey;
495 pip->gid_prefix = ibp->gid_prefix;
496 pip->lid = cpu_to_be16(ppd->lid);
497 pip->sm_lid = cpu_to_be16(ibp->sm_lid);
498 pip->cap_mask = cpu_to_be32(ibp->port_cap_flags);
499 /* pip->diag_code; */
500 pip->mkey_lease_period = cpu_to_be16(ibp->mkey_lease_period);
501 pip->local_port_num = port;
502 pip->link_width_enabled = ppd->link_width_enabled;
503 pip->link_width_supported = ppd->link_width_supported;
504 pip->link_width_active = ppd->link_width_active;
505 state = dd->f_iblink_state(ppd->lastibcstat);
506 pip->linkspeed_portstate = ppd->link_speed_supported << 4 | state;
507
508 pip->portphysstate_linkdown =
509 (dd->f_ibphys_portstate(ppd->lastibcstat) << 4) |
510 (get_linkdowndefaultstate(ppd) ? 1 : 2);
511 pip->mkeyprot_resv_lmc = (ibp->mkeyprot << 6) | ppd->lmc;
512 pip->linkspeedactive_enabled = (ppd->link_speed_active << 4) |
513 ppd->link_speed_enabled;
514 switch (ppd->ibmtu) {
515 default: /* something is wrong; fall through */
516 case 4096:
517 mtu = IB_MTU_4096;
518 break;
519 case 2048:
520 mtu = IB_MTU_2048;
521 break;
522 case 1024:
523 mtu = IB_MTU_1024;
524 break;
525 case 512:
526 mtu = IB_MTU_512;
527 break;
528 case 256:
529 mtu = IB_MTU_256;
530 break;
531 }
532 pip->neighbormtu_mastersmsl = (mtu << 4) | ibp->sm_sl;
533 pip->vlcap_inittype = ppd->vls_supported << 4; /* InitType = 0 */
534 pip->vl_high_limit = ibp->vl_high_limit;
535 pip->vl_arb_high_cap =
536 dd->f_get_ib_cfg(ppd, QIB_IB_CFG_VL_HIGH_CAP);
537 pip->vl_arb_low_cap =
538 dd->f_get_ib_cfg(ppd, QIB_IB_CFG_VL_LOW_CAP);
539 /* InitTypeReply = 0 */
540 pip->inittypereply_mtucap = qib_ibmtu ? qib_ibmtu : IB_MTU_4096;
541 /* HCAs ignore VLStallCount and HOQLife */
542 /* pip->vlstallcnt_hoqlife; */
543 pip->operationalvl_pei_peo_fpi_fpo =
544 dd->f_get_ib_cfg(ppd, QIB_IB_CFG_OP_VLS) << 4;
545 pip->mkey_violations = cpu_to_be16(ibp->mkey_violations);
546 /* P_KeyViolations are counted by hardware. */
547 pip->pkey_violations = cpu_to_be16(ibp->pkey_violations);
548 pip->qkey_violations = cpu_to_be16(ibp->qkey_violations);
549 /* Only the hardware GUID is supported for now */
550 pip->guid_cap = QIB_GUIDS_PER_PORT;
551 pip->clientrereg_resv_subnetto = ibp->subnet_timeout;
552 /* 32.768 usec. response time (guessing) */
553 pip->resv_resptimevalue = 3;
554 pip->localphyerrors_overrunerrors =
555 (get_phyerrthreshold(ppd) << 4) |
556 get_overrunthreshold(ppd);
557 /* pip->max_credit_hint; */
558 if (ibp->port_cap_flags & IB_PORT_LINK_LATENCY_SUP) {
559 u32 v;
560
561 v = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_LINKLATENCY);
562 pip->link_roundtrip_latency[0] = v >> 16;
563 pip->link_roundtrip_latency[1] = v >> 8;
564 pip->link_roundtrip_latency[2] = v;
565 }
566
567 ret = reply(smp);
568
569 bail:
570 return ret;
571 }
572
573 /**
574 * get_pkeys - return the PKEY table
575 * @dd: the qlogic_ib device
576 * @port: the IB port number
577 * @pkeys: the pkey table is placed here
578 */
579 static int get_pkeys(struct qib_devdata *dd, u8 port, u16 *pkeys)
580 {
581 struct qib_pportdata *ppd = dd->pport + port - 1;
582 /*
583 * always a kernel context, no locking needed.
584 * If we get here with ppd setup, no need to check
585 * that pd is valid.
586 */
587 struct qib_ctxtdata *rcd = dd->rcd[ppd->hw_pidx];
588
589 memcpy(pkeys, rcd->pkeys, sizeof(rcd->pkeys));
590
591 return 0;
592 }
593
594 static int subn_get_pkeytable(struct ib_smp *smp, struct ib_device *ibdev,
595 u8 port)
596 {
597 u32 startpx = 32 * (be32_to_cpu(smp->attr_mod) & 0xffff);
598 u16 *p = (u16 *) smp->data;
599 __be16 *q = (__be16 *) smp->data;
600
601 /* 64 blocks of 32 16-bit P_Key entries */
602
603 memset(smp->data, 0, sizeof(smp->data));
604 if (startpx == 0) {
605 struct qib_devdata *dd = dd_from_ibdev(ibdev);
606 unsigned i, n = qib_get_npkeys(dd);
607
608 get_pkeys(dd, port, p);
609
610 for (i = 0; i < n; i++)
611 q[i] = cpu_to_be16(p[i]);
612 } else
613 smp->status |= IB_SMP_INVALID_FIELD;
614
615 return reply(smp);
616 }
617
618 static int subn_set_guidinfo(struct ib_smp *smp, struct ib_device *ibdev,
619 u8 port)
620 {
621 struct qib_devdata *dd = dd_from_ibdev(ibdev);
622 u32 startgx = 8 * be32_to_cpu(smp->attr_mod);
623 __be64 *p = (__be64 *) smp->data;
624 unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */
625
626 /* 32 blocks of 8 64-bit GUIDs per block */
627
628 if (startgx == 0 && pidx < dd->num_pports) {
629 struct qib_pportdata *ppd = dd->pport + pidx;
630 struct qib_ibport *ibp = &ppd->ibport_data;
631 unsigned i;
632
633 /* The first entry is read-only. */
634 for (i = 1; i < QIB_GUIDS_PER_PORT; i++)
635 ibp->guids[i - 1] = p[i];
636 } else
637 smp->status |= IB_SMP_INVALID_FIELD;
638
639 /* The only GUID we support is the first read-only entry. */
640 return subn_get_guidinfo(smp, ibdev, port);
641 }
642
643 /**
644 * subn_set_portinfo - set port information
645 * @smp: the incoming SM packet
646 * @ibdev: the infiniband device
647 * @port: the port on the device
648 *
649 * Set Portinfo (see ch. 14.2.5.6).
650 */
651 static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
652 u8 port)
653 {
654 struct ib_port_info *pip = (struct ib_port_info *)smp->data;
655 struct ib_event event;
656 struct qib_devdata *dd;
657 struct qib_pportdata *ppd;
658 struct qib_ibport *ibp;
659 u8 clientrereg = (pip->clientrereg_resv_subnetto & 0x80);
660 unsigned long flags;
661 u16 lid, smlid;
662 u8 lwe;
663 u8 lse;
664 u8 state;
665 u8 vls;
666 u8 msl;
667 u16 lstate;
668 int ret, ore, mtu;
669 u32 port_num = be32_to_cpu(smp->attr_mod);
670
671 if (port_num == 0)
672 port_num = port;
673 else {
674 if (port_num > ibdev->phys_port_cnt)
675 goto err;
676 /* Port attributes can only be set on the receiving port */
677 if (port_num != port)
678 goto get_only;
679 }
680
681 dd = dd_from_ibdev(ibdev);
682 /* IB numbers ports from 1, hdw from 0 */
683 ppd = dd->pport + (port_num - 1);
684 ibp = &ppd->ibport_data;
685 event.device = ibdev;
686 event.element.port_num = port;
687
688 ibp->mkey = pip->mkey;
689 ibp->gid_prefix = pip->gid_prefix;
690 ibp->mkey_lease_period = be16_to_cpu(pip->mkey_lease_period);
691
692 lid = be16_to_cpu(pip->lid);
693 /* Must be a valid unicast LID address. */
694 if (lid == 0 || lid >= QIB_MULTICAST_LID_BASE)
695 smp->status |= IB_SMP_INVALID_FIELD;
696 else if (ppd->lid != lid || ppd->lmc != (pip->mkeyprot_resv_lmc & 7)) {
697 if (ppd->lid != lid)
698 qib_set_uevent_bits(ppd, _QIB_EVENT_LID_CHANGE_BIT);
699 if (ppd->lmc != (pip->mkeyprot_resv_lmc & 7))
700 qib_set_uevent_bits(ppd, _QIB_EVENT_LMC_CHANGE_BIT);
701 qib_set_lid(ppd, lid, pip->mkeyprot_resv_lmc & 7);
702 event.event = IB_EVENT_LID_CHANGE;
703 ib_dispatch_event(&event);
704 }
705
706 smlid = be16_to_cpu(pip->sm_lid);
707 msl = pip->neighbormtu_mastersmsl & 0xF;
708 /* Must be a valid unicast LID address. */
709 if (smlid == 0 || smlid >= QIB_MULTICAST_LID_BASE)
710 smp->status |= IB_SMP_INVALID_FIELD;
711 else if (smlid != ibp->sm_lid || msl != ibp->sm_sl) {
712 spin_lock_irqsave(&ibp->lock, flags);
713 if (ibp->sm_ah) {
714 if (smlid != ibp->sm_lid)
715 ibp->sm_ah->attr.dlid = smlid;
716 if (msl != ibp->sm_sl)
717 ibp->sm_ah->attr.sl = msl;
718 }
719 spin_unlock_irqrestore(&ibp->lock, flags);
720 if (smlid != ibp->sm_lid)
721 ibp->sm_lid = smlid;
722 if (msl != ibp->sm_sl)
723 ibp->sm_sl = msl;
724 event.event = IB_EVENT_SM_CHANGE;
725 ib_dispatch_event(&event);
726 }
727
728 /* Allow 1x or 4x to be set (see 14.2.6.6). */
729 lwe = pip->link_width_enabled;
730 if (lwe) {
731 if (lwe == 0xFF)
732 set_link_width_enabled(ppd, ppd->link_width_supported);
733 else if (lwe >= 16 || (lwe & ~ppd->link_width_supported))
734 smp->status |= IB_SMP_INVALID_FIELD;
735 else if (lwe != ppd->link_width_enabled)
736 set_link_width_enabled(ppd, lwe);
737 }
738
739 lse = pip->linkspeedactive_enabled & 0xF;
740 if (lse) {
741 /*
742 * The IB 1.2 spec. only allows link speed values
743 * 1, 3, 5, 7, 15. 1.2.1 extended to allow specific
744 * speeds.
745 */
746 if (lse == 15)
747 set_link_speed_enabled(ppd,
748 ppd->link_speed_supported);
749 else if (lse >= 8 || (lse & ~ppd->link_speed_supported))
750 smp->status |= IB_SMP_INVALID_FIELD;
751 else if (lse != ppd->link_speed_enabled)
752 set_link_speed_enabled(ppd, lse);
753 }
754
755 /* Set link down default state. */
756 switch (pip->portphysstate_linkdown & 0xF) {
757 case 0: /* NOP */
758 break;
759 case 1: /* SLEEP */
760 (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LINKDEFAULT,
761 IB_LINKINITCMD_SLEEP);
762 break;
763 case 2: /* POLL */
764 (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LINKDEFAULT,
765 IB_LINKINITCMD_POLL);
766 break;
767 default:
768 smp->status |= IB_SMP_INVALID_FIELD;
769 }
770
771 ibp->mkeyprot = pip->mkeyprot_resv_lmc >> 6;
772 ibp->vl_high_limit = pip->vl_high_limit;
773 (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_VL_HIGH_LIMIT,
774 ibp->vl_high_limit);
775
776 mtu = ib_mtu_enum_to_int((pip->neighbormtu_mastersmsl >> 4) & 0xF);
777 if (mtu == -1)
778 smp->status |= IB_SMP_INVALID_FIELD;
779 else
780 qib_set_mtu(ppd, mtu);
781
782 /* Set operational VLs */
783 vls = (pip->operationalvl_pei_peo_fpi_fpo >> 4) & 0xF;
784 if (vls) {
785 if (vls > ppd->vls_supported)
786 smp->status |= IB_SMP_INVALID_FIELD;
787 else
788 (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_OP_VLS, vls);
789 }
790
791 if (pip->mkey_violations == 0)
792 ibp->mkey_violations = 0;
793
794 if (pip->pkey_violations == 0)
795 ibp->pkey_violations = 0;
796
797 if (pip->qkey_violations == 0)
798 ibp->qkey_violations = 0;
799
800 ore = pip->localphyerrors_overrunerrors;
801 if (set_phyerrthreshold(ppd, (ore >> 4) & 0xF))
802 smp->status |= IB_SMP_INVALID_FIELD;
803
804 if (set_overrunthreshold(ppd, (ore & 0xF)))
805 smp->status |= IB_SMP_INVALID_FIELD;
806
807 ibp->subnet_timeout = pip->clientrereg_resv_subnetto & 0x1F;
808
809 /*
810 * Do the port state change now that the other link parameters
811 * have been set.
812 * Changing the port physical state only makes sense if the link
813 * is down or is being set to down.
814 */
815 state = pip->linkspeed_portstate & 0xF;
816 lstate = (pip->portphysstate_linkdown >> 4) & 0xF;
817 if (lstate && !(state == IB_PORT_DOWN || state == IB_PORT_NOP))
818 smp->status |= IB_SMP_INVALID_FIELD;
819
820 /*
821 * Only state changes of DOWN, ARM, and ACTIVE are valid
822 * and must be in the correct state to take effect (see 7.2.6).
823 */
824 switch (state) {
825 case IB_PORT_NOP:
826 if (lstate == 0)
827 break;
828 /* FALLTHROUGH */
829 case IB_PORT_DOWN:
830 if (lstate == 0)
831 lstate = QIB_IB_LINKDOWN_ONLY;
832 else if (lstate == 1)
833 lstate = QIB_IB_LINKDOWN_SLEEP;
834 else if (lstate == 2)
835 lstate = QIB_IB_LINKDOWN;
836 else if (lstate == 3)
837 lstate = QIB_IB_LINKDOWN_DISABLE;
838 else {
839 smp->status |= IB_SMP_INVALID_FIELD;
840 break;
841 }
842 spin_lock_irqsave(&ppd->lflags_lock, flags);
843 ppd->lflags &= ~QIBL_LINKV;
844 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
845 qib_set_linkstate(ppd, lstate);
846 /*
847 * Don't send a reply if the response would be sent
848 * through the disabled port.
849 */
850 if (lstate == QIB_IB_LINKDOWN_DISABLE && smp->hop_cnt) {
851 ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
852 goto done;
853 }
854 qib_wait_linkstate(ppd, QIBL_LINKV, 10);
855 break;
856 case IB_PORT_ARMED:
857 qib_set_linkstate(ppd, QIB_IB_LINKARM);
858 break;
859 case IB_PORT_ACTIVE:
860 qib_set_linkstate(ppd, QIB_IB_LINKACTIVE);
861 break;
862 default:
863 smp->status |= IB_SMP_INVALID_FIELD;
864 }
865
866 if (clientrereg) {
867 event.event = IB_EVENT_CLIENT_REREGISTER;
868 ib_dispatch_event(&event);
869 }
870
871 ret = subn_get_portinfo(smp, ibdev, port);
872
873 /* restore re-reg bit per o14-12.2.1 */
874 pip->clientrereg_resv_subnetto |= clientrereg;
875
876 goto get_only;
877
878 err:
879 smp->status |= IB_SMP_INVALID_FIELD;
880 get_only:
881 ret = subn_get_portinfo(smp, ibdev, port);
882 done:
883 return ret;
884 }
885
886 /**
887 * rm_pkey - decrecment the reference count for the given PKEY
888 * @dd: the qlogic_ib device
889 * @key: the PKEY index
890 *
891 * Return true if this was the last reference and the hardware table entry
892 * needs to be changed.
893 */
894 static int rm_pkey(struct qib_pportdata *ppd, u16 key)
895 {
896 int i;
897 int ret;
898
899 for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
900 if (ppd->pkeys[i] != key)
901 continue;
902 if (atomic_dec_and_test(&ppd->pkeyrefs[i])) {
903 ppd->pkeys[i] = 0;
904 ret = 1;
905 goto bail;
906 }
907 break;
908 }
909
910 ret = 0;
911
912 bail:
913 return ret;
914 }
915
916 /**
917 * add_pkey - add the given PKEY to the hardware table
918 * @dd: the qlogic_ib device
919 * @key: the PKEY
920 *
921 * Return an error code if unable to add the entry, zero if no change,
922 * or 1 if the hardware PKEY register needs to be updated.
923 */
924 static int add_pkey(struct qib_pportdata *ppd, u16 key)
925 {
926 int i;
927 u16 lkey = key & 0x7FFF;
928 int any = 0;
929 int ret;
930
931 if (lkey == 0x7FFF) {
932 ret = 0;
933 goto bail;
934 }
935
936 /* Look for an empty slot or a matching PKEY. */
937 for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
938 if (!ppd->pkeys[i]) {
939 any++;
940 continue;
941 }
942 /* If it matches exactly, try to increment the ref count */
943 if (ppd->pkeys[i] == key) {
944 if (atomic_inc_return(&ppd->pkeyrefs[i]) > 1) {
945 ret = 0;
946 goto bail;
947 }
948 /* Lost the race. Look for an empty slot below. */
949 atomic_dec(&ppd->pkeyrefs[i]);
950 any++;
951 }
952 /*
953 * It makes no sense to have both the limited and unlimited
954 * PKEY set at the same time since the unlimited one will
955 * disable the limited one.
956 */
957 if ((ppd->pkeys[i] & 0x7FFF) == lkey) {
958 ret = -EEXIST;
959 goto bail;
960 }
961 }
962 if (!any) {
963 ret = -EBUSY;
964 goto bail;
965 }
966 for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
967 if (!ppd->pkeys[i] &&
968 atomic_inc_return(&ppd->pkeyrefs[i]) == 1) {
969 /* for qibstats, etc. */
970 ppd->pkeys[i] = key;
971 ret = 1;
972 goto bail;
973 }
974 }
975 ret = -EBUSY;
976
977 bail:
978 return ret;
979 }
980
981 /**
982 * set_pkeys - set the PKEY table for ctxt 0
983 * @dd: the qlogic_ib device
984 * @port: the IB port number
985 * @pkeys: the PKEY table
986 */
987 static int set_pkeys(struct qib_devdata *dd, u8 port, u16 *pkeys)
988 {
989 struct qib_pportdata *ppd;
990 struct qib_ctxtdata *rcd;
991 int i;
992 int changed = 0;
993
994 /*
995 * IB port one/two always maps to context zero/one,
996 * always a kernel context, no locking needed
997 * If we get here with ppd setup, no need to check
998 * that rcd is valid.
999 */
1000 ppd = dd->pport + (port - 1);
1001 rcd = dd->rcd[ppd->hw_pidx];
1002
1003 for (i = 0; i < ARRAY_SIZE(rcd->pkeys); i++) {
1004 u16 key = pkeys[i];
1005 u16 okey = rcd->pkeys[i];
1006
1007 if (key == okey)
1008 continue;
1009 /*
1010 * The value of this PKEY table entry is changing.
1011 * Remove the old entry in the hardware's array of PKEYs.
1012 */
1013 if (okey & 0x7FFF)
1014 changed |= rm_pkey(ppd, okey);
1015 if (key & 0x7FFF) {
1016 int ret = add_pkey(ppd, key);
1017
1018 if (ret < 0)
1019 key = 0;
1020 else
1021 changed |= ret;
1022 }
1023 rcd->pkeys[i] = key;
1024 }
1025 if (changed) {
1026 struct ib_event event;
1027
1028 (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PKEYS, 0);
1029
1030 event.event = IB_EVENT_PKEY_CHANGE;
1031 event.device = &dd->verbs_dev.ibdev;
1032 event.element.port_num = port;
1033 ib_dispatch_event(&event);
1034 }
1035 return 0;
1036 }
1037
1038 static int subn_set_pkeytable(struct ib_smp *smp, struct ib_device *ibdev,
1039 u8 port)
1040 {
1041 u32 startpx = 32 * (be32_to_cpu(smp->attr_mod) & 0xffff);
1042 __be16 *p = (__be16 *) smp->data;
1043 u16 *q = (u16 *) smp->data;
1044 struct qib_devdata *dd = dd_from_ibdev(ibdev);
1045 unsigned i, n = qib_get_npkeys(dd);
1046
1047 for (i = 0; i < n; i++)
1048 q[i] = be16_to_cpu(p[i]);
1049
1050 if (startpx != 0 || set_pkeys(dd, port, q) != 0)
1051 smp->status |= IB_SMP_INVALID_FIELD;
1052
1053 return subn_get_pkeytable(smp, ibdev, port);
1054 }
1055
1056 static int subn_get_sl_to_vl(struct ib_smp *smp, struct ib_device *ibdev,
1057 u8 port)
1058 {
1059 struct qib_ibport *ibp = to_iport(ibdev, port);
1060 u8 *p = (u8 *) smp->data;
1061 unsigned i;
1062
1063 memset(smp->data, 0, sizeof(smp->data));
1064
1065 if (!(ibp->port_cap_flags & IB_PORT_SL_MAP_SUP))
1066 smp->status |= IB_SMP_UNSUP_METHOD;
1067 else
1068 for (i = 0; i < ARRAY_SIZE(ibp->sl_to_vl); i += 2)
1069 *p++ = (ibp->sl_to_vl[i] << 4) | ibp->sl_to_vl[i + 1];
1070
1071 return reply(smp);
1072 }
1073
1074 static int subn_set_sl_to_vl(struct ib_smp *smp, struct ib_device *ibdev,
1075 u8 port)
1076 {
1077 struct qib_ibport *ibp = to_iport(ibdev, port);
1078 u8 *p = (u8 *) smp->data;
1079 unsigned i;
1080
1081 if (!(ibp->port_cap_flags & IB_PORT_SL_MAP_SUP)) {
1082 smp->status |= IB_SMP_UNSUP_METHOD;
1083 return reply(smp);
1084 }
1085
1086 for (i = 0; i < ARRAY_SIZE(ibp->sl_to_vl); i += 2, p++) {
1087 ibp->sl_to_vl[i] = *p >> 4;
1088 ibp->sl_to_vl[i + 1] = *p & 0xF;
1089 }
1090 qib_set_uevent_bits(ppd_from_ibp(to_iport(ibdev, port)),
1091 _QIB_EVENT_SL2VL_CHANGE_BIT);
1092
1093 return subn_get_sl_to_vl(smp, ibdev, port);
1094 }
1095
1096 static int subn_get_vl_arb(struct ib_smp *smp, struct ib_device *ibdev,
1097 u8 port)
1098 {
1099 unsigned which = be32_to_cpu(smp->attr_mod) >> 16;
1100 struct qib_pportdata *ppd = ppd_from_ibp(to_iport(ibdev, port));
1101
1102 memset(smp->data, 0, sizeof(smp->data));
1103
1104 if (ppd->vls_supported == IB_VL_VL0)
1105 smp->status |= IB_SMP_UNSUP_METHOD;
1106 else if (which == IB_VLARB_LOWPRI_0_31)
1107 (void) ppd->dd->f_get_ib_table(ppd, QIB_IB_TBL_VL_LOW_ARB,
1108 smp->data);
1109 else if (which == IB_VLARB_HIGHPRI_0_31)
1110 (void) ppd->dd->f_get_ib_table(ppd, QIB_IB_TBL_VL_HIGH_ARB,
1111 smp->data);
1112 else
1113 smp->status |= IB_SMP_INVALID_FIELD;
1114
1115 return reply(smp);
1116 }
1117
1118 static int subn_set_vl_arb(struct ib_smp *smp, struct ib_device *ibdev,
1119 u8 port)
1120 {
1121 unsigned which = be32_to_cpu(smp->attr_mod) >> 16;
1122 struct qib_pportdata *ppd = ppd_from_ibp(to_iport(ibdev, port));
1123
1124 if (ppd->vls_supported == IB_VL_VL0)
1125 smp->status |= IB_SMP_UNSUP_METHOD;
1126 else if (which == IB_VLARB_LOWPRI_0_31)
1127 (void) ppd->dd->f_set_ib_table(ppd, QIB_IB_TBL_VL_LOW_ARB,
1128 smp->data);
1129 else if (which == IB_VLARB_HIGHPRI_0_31)
1130 (void) ppd->dd->f_set_ib_table(ppd, QIB_IB_TBL_VL_HIGH_ARB,
1131 smp->data);
1132 else
1133 smp->status |= IB_SMP_INVALID_FIELD;
1134
1135 return subn_get_vl_arb(smp, ibdev, port);
1136 }
1137
1138 static int subn_trap_repress(struct ib_smp *smp, struct ib_device *ibdev,
1139 u8 port)
1140 {
1141 /*
1142 * For now, we only send the trap once so no need to process this.
1143 * o13-6, o13-7,
1144 * o14-3.a4 The SMA shall not send any message in response to a valid
1145 * SubnTrapRepress() message.
1146 */
1147 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
1148 }
1149
1150 static int pma_get_classportinfo(struct ib_pma_mad *pmp,
1151 struct ib_device *ibdev)
1152 {
1153 struct ib_class_port_info *p =
1154 (struct ib_class_port_info *)pmp->data;
1155 struct qib_devdata *dd = dd_from_ibdev(ibdev);
1156
1157 memset(pmp->data, 0, sizeof(pmp->data));
1158
1159 if (pmp->mad_hdr.attr_mod != 0)
1160 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
1161
1162 /* Note that AllPortSelect is not valid */
1163 p->base_version = 1;
1164 p->class_version = 1;
1165 p->capability_mask = IB_PMA_CLASS_CAP_EXT_WIDTH;
1166 /*
1167 * Set the most significant bit of CM2 to indicate support for
1168 * congestion statistics
1169 */
1170 p->reserved[0] = dd->psxmitwait_supported << 7;
1171 /*
1172 * Expected response time is 4.096 usec. * 2^18 == 1.073741824 sec.
1173 */
1174 p->resp_time_value = 18;
1175
1176 return reply((struct ib_smp *) pmp);
1177 }
1178
1179 static int pma_get_portsamplescontrol(struct ib_pma_mad *pmp,
1180 struct ib_device *ibdev, u8 port)
1181 {
1182 struct ib_pma_portsamplescontrol *p =
1183 (struct ib_pma_portsamplescontrol *)pmp->data;
1184 struct qib_ibdev *dev = to_idev(ibdev);
1185 struct qib_devdata *dd = dd_from_dev(dev);
1186 struct qib_ibport *ibp = to_iport(ibdev, port);
1187 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1188 unsigned long flags;
1189 u8 port_select = p->port_select;
1190
1191 memset(pmp->data, 0, sizeof(pmp->data));
1192
1193 p->port_select = port_select;
1194 if (pmp->mad_hdr.attr_mod != 0 || port_select != port) {
1195 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
1196 goto bail;
1197 }
1198 spin_lock_irqsave(&ibp->lock, flags);
1199 p->tick = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_PMA_TICKS);
1200 p->sample_status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
1201 p->counter_width = 4; /* 32 bit counters */
1202 p->counter_mask0_9 = COUNTER_MASK0_9;
1203 p->sample_start = cpu_to_be32(ibp->pma_sample_start);
1204 p->sample_interval = cpu_to_be32(ibp->pma_sample_interval);
1205 p->tag = cpu_to_be16(ibp->pma_tag);
1206 p->counter_select[0] = ibp->pma_counter_select[0];
1207 p->counter_select[1] = ibp->pma_counter_select[1];
1208 p->counter_select[2] = ibp->pma_counter_select[2];
1209 p->counter_select[3] = ibp->pma_counter_select[3];
1210 p->counter_select[4] = ibp->pma_counter_select[4];
1211 spin_unlock_irqrestore(&ibp->lock, flags);
1212
1213 bail:
1214 return reply((struct ib_smp *) pmp);
1215 }
1216
1217 static int pma_set_portsamplescontrol(struct ib_pma_mad *pmp,
1218 struct ib_device *ibdev, u8 port)
1219 {
1220 struct ib_pma_portsamplescontrol *p =
1221 (struct ib_pma_portsamplescontrol *)pmp->data;
1222 struct qib_ibdev *dev = to_idev(ibdev);
1223 struct qib_devdata *dd = dd_from_dev(dev);
1224 struct qib_ibport *ibp = to_iport(ibdev, port);
1225 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1226 unsigned long flags;
1227 u8 status, xmit_flags;
1228 int ret;
1229
1230 if (pmp->mad_hdr.attr_mod != 0 || p->port_select != port) {
1231 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
1232 ret = reply((struct ib_smp *) pmp);
1233 goto bail;
1234 }
1235
1236 spin_lock_irqsave(&ibp->lock, flags);
1237
1238 /* Port Sampling code owns the PS* HW counters */
1239 xmit_flags = ppd->cong_stats.flags;
1240 ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_SAMPLE;
1241 status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
1242 if (status == IB_PMA_SAMPLE_STATUS_DONE ||
1243 (status == IB_PMA_SAMPLE_STATUS_RUNNING &&
1244 xmit_flags == IB_PMA_CONG_HW_CONTROL_TIMER)) {
1245 ibp->pma_sample_start = be32_to_cpu(p->sample_start);
1246 ibp->pma_sample_interval = be32_to_cpu(p->sample_interval);
1247 ibp->pma_tag = be16_to_cpu(p->tag);
1248 ibp->pma_counter_select[0] = p->counter_select[0];
1249 ibp->pma_counter_select[1] = p->counter_select[1];
1250 ibp->pma_counter_select[2] = p->counter_select[2];
1251 ibp->pma_counter_select[3] = p->counter_select[3];
1252 ibp->pma_counter_select[4] = p->counter_select[4];
1253 dd->f_set_cntr_sample(ppd, ibp->pma_sample_interval,
1254 ibp->pma_sample_start);
1255 }
1256 spin_unlock_irqrestore(&ibp->lock, flags);
1257
1258 ret = pma_get_portsamplescontrol(pmp, ibdev, port);
1259
1260 bail:
1261 return ret;
1262 }
1263
1264 static u64 get_counter(struct qib_ibport *ibp, struct qib_pportdata *ppd,
1265 __be16 sel)
1266 {
1267 u64 ret;
1268
1269 switch (sel) {
1270 case IB_PMA_PORT_XMIT_DATA:
1271 ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSXMITDATA);
1272 break;
1273 case IB_PMA_PORT_RCV_DATA:
1274 ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSRCVDATA);
1275 break;
1276 case IB_PMA_PORT_XMIT_PKTS:
1277 ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSXMITPKTS);
1278 break;
1279 case IB_PMA_PORT_RCV_PKTS:
1280 ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSRCVPKTS);
1281 break;
1282 case IB_PMA_PORT_XMIT_WAIT:
1283 ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSXMITWAIT);
1284 break;
1285 default:
1286 ret = 0;
1287 }
1288
1289 return ret;
1290 }
1291
1292 /* This function assumes that the xmit_wait lock is already held */
1293 static u64 xmit_wait_get_value_delta(struct qib_pportdata *ppd)
1294 {
1295 u32 delta;
1296
1297 delta = get_counter(&ppd->ibport_data, ppd,
1298 IB_PMA_PORT_XMIT_WAIT);
1299 return ppd->cong_stats.counter + delta;
1300 }
1301
1302 static void cache_hw_sample_counters(struct qib_pportdata *ppd)
1303 {
1304 struct qib_ibport *ibp = &ppd->ibport_data;
1305
1306 ppd->cong_stats.counter_cache.psxmitdata =
1307 get_counter(ibp, ppd, IB_PMA_PORT_XMIT_DATA);
1308 ppd->cong_stats.counter_cache.psrcvdata =
1309 get_counter(ibp, ppd, IB_PMA_PORT_RCV_DATA);
1310 ppd->cong_stats.counter_cache.psxmitpkts =
1311 get_counter(ibp, ppd, IB_PMA_PORT_XMIT_PKTS);
1312 ppd->cong_stats.counter_cache.psrcvpkts =
1313 get_counter(ibp, ppd, IB_PMA_PORT_RCV_PKTS);
1314 ppd->cong_stats.counter_cache.psxmitwait =
1315 get_counter(ibp, ppd, IB_PMA_PORT_XMIT_WAIT);
1316 }
1317
1318 static u64 get_cache_hw_sample_counters(struct qib_pportdata *ppd,
1319 __be16 sel)
1320 {
1321 u64 ret;
1322
1323 switch (sel) {
1324 case IB_PMA_PORT_XMIT_DATA:
1325 ret = ppd->cong_stats.counter_cache.psxmitdata;
1326 break;
1327 case IB_PMA_PORT_RCV_DATA:
1328 ret = ppd->cong_stats.counter_cache.psrcvdata;
1329 break;
1330 case IB_PMA_PORT_XMIT_PKTS:
1331 ret = ppd->cong_stats.counter_cache.psxmitpkts;
1332 break;
1333 case IB_PMA_PORT_RCV_PKTS:
1334 ret = ppd->cong_stats.counter_cache.psrcvpkts;
1335 break;
1336 case IB_PMA_PORT_XMIT_WAIT:
1337 ret = ppd->cong_stats.counter_cache.psxmitwait;
1338 break;
1339 default:
1340 ret = 0;
1341 }
1342
1343 return ret;
1344 }
1345
1346 static int pma_get_portsamplesresult(struct ib_pma_mad *pmp,
1347 struct ib_device *ibdev, u8 port)
1348 {
1349 struct ib_pma_portsamplesresult *p =
1350 (struct ib_pma_portsamplesresult *)pmp->data;
1351 struct qib_ibdev *dev = to_idev(ibdev);
1352 struct qib_devdata *dd = dd_from_dev(dev);
1353 struct qib_ibport *ibp = to_iport(ibdev, port);
1354 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1355 unsigned long flags;
1356 u8 status;
1357 int i;
1358
1359 memset(pmp->data, 0, sizeof(pmp->data));
1360 spin_lock_irqsave(&ibp->lock, flags);
1361 p->tag = cpu_to_be16(ibp->pma_tag);
1362 if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_TIMER)
1363 p->sample_status = IB_PMA_SAMPLE_STATUS_DONE;
1364 else {
1365 status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
1366 p->sample_status = cpu_to_be16(status);
1367 if (status == IB_PMA_SAMPLE_STATUS_DONE) {
1368 cache_hw_sample_counters(ppd);
1369 ppd->cong_stats.counter =
1370 xmit_wait_get_value_delta(ppd);
1371 dd->f_set_cntr_sample(ppd,
1372 QIB_CONG_TIMER_PSINTERVAL, 0);
1373 ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_TIMER;
1374 }
1375 }
1376 for (i = 0; i < ARRAY_SIZE(ibp->pma_counter_select); i++)
1377 p->counter[i] = cpu_to_be32(
1378 get_cache_hw_sample_counters(
1379 ppd, ibp->pma_counter_select[i]));
1380 spin_unlock_irqrestore(&ibp->lock, flags);
1381
1382 return reply((struct ib_smp *) pmp);
1383 }
1384
1385 static int pma_get_portsamplesresult_ext(struct ib_pma_mad *pmp,
1386 struct ib_device *ibdev, u8 port)
1387 {
1388 struct ib_pma_portsamplesresult_ext *p =
1389 (struct ib_pma_portsamplesresult_ext *)pmp->data;
1390 struct qib_ibdev *dev = to_idev(ibdev);
1391 struct qib_devdata *dd = dd_from_dev(dev);
1392 struct qib_ibport *ibp = to_iport(ibdev, port);
1393 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1394 unsigned long flags;
1395 u8 status;
1396 int i;
1397
1398 /* Port Sampling code owns the PS* HW counters */
1399 memset(pmp->data, 0, sizeof(pmp->data));
1400 spin_lock_irqsave(&ibp->lock, flags);
1401 p->tag = cpu_to_be16(ibp->pma_tag);
1402 if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_TIMER)
1403 p->sample_status = IB_PMA_SAMPLE_STATUS_DONE;
1404 else {
1405 status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
1406 p->sample_status = cpu_to_be16(status);
1407 /* 64 bits */
1408 p->extended_width = cpu_to_be32(0x80000000);
1409 if (status == IB_PMA_SAMPLE_STATUS_DONE) {
1410 cache_hw_sample_counters(ppd);
1411 ppd->cong_stats.counter =
1412 xmit_wait_get_value_delta(ppd);
1413 dd->f_set_cntr_sample(ppd,
1414 QIB_CONG_TIMER_PSINTERVAL, 0);
1415 ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_TIMER;
1416 }
1417 }
1418 for (i = 0; i < ARRAY_SIZE(ibp->pma_counter_select); i++)
1419 p->counter[i] = cpu_to_be64(
1420 get_cache_hw_sample_counters(
1421 ppd, ibp->pma_counter_select[i]));
1422 spin_unlock_irqrestore(&ibp->lock, flags);
1423
1424 return reply((struct ib_smp *) pmp);
1425 }
1426
1427 static int pma_get_portcounters(struct ib_pma_mad *pmp,
1428 struct ib_device *ibdev, u8 port)
1429 {
1430 struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
1431 pmp->data;
1432 struct qib_ibport *ibp = to_iport(ibdev, port);
1433 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1434 struct qib_verbs_counters cntrs;
1435 u8 port_select = p->port_select;
1436
1437 qib_get_counters(ppd, &cntrs);
1438
1439 /* Adjust counters for any resets done. */
1440 cntrs.symbol_error_counter -= ibp->z_symbol_error_counter;
1441 cntrs.link_error_recovery_counter -=
1442 ibp->z_link_error_recovery_counter;
1443 cntrs.link_downed_counter -= ibp->z_link_downed_counter;
1444 cntrs.port_rcv_errors -= ibp->z_port_rcv_errors;
1445 cntrs.port_rcv_remphys_errors -= ibp->z_port_rcv_remphys_errors;
1446 cntrs.port_xmit_discards -= ibp->z_port_xmit_discards;
1447 cntrs.port_xmit_data -= ibp->z_port_xmit_data;
1448 cntrs.port_rcv_data -= ibp->z_port_rcv_data;
1449 cntrs.port_xmit_packets -= ibp->z_port_xmit_packets;
1450 cntrs.port_rcv_packets -= ibp->z_port_rcv_packets;
1451 cntrs.local_link_integrity_errors -=
1452 ibp->z_local_link_integrity_errors;
1453 cntrs.excessive_buffer_overrun_errors -=
1454 ibp->z_excessive_buffer_overrun_errors;
1455 cntrs.vl15_dropped -= ibp->z_vl15_dropped;
1456 cntrs.vl15_dropped += ibp->n_vl15_dropped;
1457
1458 memset(pmp->data, 0, sizeof(pmp->data));
1459
1460 p->port_select = port_select;
1461 if (pmp->mad_hdr.attr_mod != 0 || port_select != port)
1462 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
1463
1464 if (cntrs.symbol_error_counter > 0xFFFFUL)
1465 p->symbol_error_counter = cpu_to_be16(0xFFFF);
1466 else
1467 p->symbol_error_counter =
1468 cpu_to_be16((u16)cntrs.symbol_error_counter);
1469 if (cntrs.link_error_recovery_counter > 0xFFUL)
1470 p->link_error_recovery_counter = 0xFF;
1471 else
1472 p->link_error_recovery_counter =
1473 (u8)cntrs.link_error_recovery_counter;
1474 if (cntrs.link_downed_counter > 0xFFUL)
1475 p->link_downed_counter = 0xFF;
1476 else
1477 p->link_downed_counter = (u8)cntrs.link_downed_counter;
1478 if (cntrs.port_rcv_errors > 0xFFFFUL)
1479 p->port_rcv_errors = cpu_to_be16(0xFFFF);
1480 else
1481 p->port_rcv_errors =
1482 cpu_to_be16((u16) cntrs.port_rcv_errors);
1483 if (cntrs.port_rcv_remphys_errors > 0xFFFFUL)
1484 p->port_rcv_remphys_errors = cpu_to_be16(0xFFFF);
1485 else
1486 p->port_rcv_remphys_errors =
1487 cpu_to_be16((u16)cntrs.port_rcv_remphys_errors);
1488 if (cntrs.port_xmit_discards > 0xFFFFUL)
1489 p->port_xmit_discards = cpu_to_be16(0xFFFF);
1490 else
1491 p->port_xmit_discards =
1492 cpu_to_be16((u16)cntrs.port_xmit_discards);
1493 if (cntrs.local_link_integrity_errors > 0xFUL)
1494 cntrs.local_link_integrity_errors = 0xFUL;
1495 if (cntrs.excessive_buffer_overrun_errors > 0xFUL)
1496 cntrs.excessive_buffer_overrun_errors = 0xFUL;
1497 p->link_overrun_errors = (cntrs.local_link_integrity_errors << 4) |
1498 cntrs.excessive_buffer_overrun_errors;
1499 if (cntrs.vl15_dropped > 0xFFFFUL)
1500 p->vl15_dropped = cpu_to_be16(0xFFFF);
1501 else
1502 p->vl15_dropped = cpu_to_be16((u16)cntrs.vl15_dropped);
1503 if (cntrs.port_xmit_data > 0xFFFFFFFFUL)
1504 p->port_xmit_data = cpu_to_be32(0xFFFFFFFF);
1505 else
1506 p->port_xmit_data = cpu_to_be32((u32)cntrs.port_xmit_data);
1507 if (cntrs.port_rcv_data > 0xFFFFFFFFUL)
1508 p->port_rcv_data = cpu_to_be32(0xFFFFFFFF);
1509 else
1510 p->port_rcv_data = cpu_to_be32((u32)cntrs.port_rcv_data);
1511 if (cntrs.port_xmit_packets > 0xFFFFFFFFUL)
1512 p->port_xmit_packets = cpu_to_be32(0xFFFFFFFF);
1513 else
1514 p->port_xmit_packets =
1515 cpu_to_be32((u32)cntrs.port_xmit_packets);
1516 if (cntrs.port_rcv_packets > 0xFFFFFFFFUL)
1517 p->port_rcv_packets = cpu_to_be32(0xFFFFFFFF);
1518 else
1519 p->port_rcv_packets =
1520 cpu_to_be32((u32) cntrs.port_rcv_packets);
1521
1522 return reply((struct ib_smp *) pmp);
1523 }
1524
1525 static int pma_get_portcounters_cong(struct ib_pma_mad *pmp,
1526 struct ib_device *ibdev, u8 port)
1527 {
1528 /* Congestion PMA packets start at offset 24 not 64 */
1529 struct ib_pma_portcounters_cong *p =
1530 (struct ib_pma_portcounters_cong *)pmp->reserved;
1531 struct qib_verbs_counters cntrs;
1532 struct qib_ibport *ibp = to_iport(ibdev, port);
1533 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1534 struct qib_devdata *dd = dd_from_ppd(ppd);
1535 u32 port_select = be32_to_cpu(pmp->mad_hdr.attr_mod) & 0xFF;
1536 u64 xmit_wait_counter;
1537 unsigned long flags;
1538
1539 /*
1540 * This check is performed only in the GET method because the
1541 * SET method ends up calling this anyway.
1542 */
1543 if (!dd->psxmitwait_supported)
1544 pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
1545 if (port_select != port)
1546 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
1547
1548 qib_get_counters(ppd, &cntrs);
1549 spin_lock_irqsave(&ppd->ibport_data.lock, flags);
1550 xmit_wait_counter = xmit_wait_get_value_delta(ppd);
1551 spin_unlock_irqrestore(&ppd->ibport_data.lock, flags);
1552
1553 /* Adjust counters for any resets done. */
1554 cntrs.symbol_error_counter -= ibp->z_symbol_error_counter;
1555 cntrs.link_error_recovery_counter -=
1556 ibp->z_link_error_recovery_counter;
1557 cntrs.link_downed_counter -= ibp->z_link_downed_counter;
1558 cntrs.port_rcv_errors -= ibp->z_port_rcv_errors;
1559 cntrs.port_rcv_remphys_errors -=
1560 ibp->z_port_rcv_remphys_errors;
1561 cntrs.port_xmit_discards -= ibp->z_port_xmit_discards;
1562 cntrs.local_link_integrity_errors -=
1563 ibp->z_local_link_integrity_errors;
1564 cntrs.excessive_buffer_overrun_errors -=
1565 ibp->z_excessive_buffer_overrun_errors;
1566 cntrs.vl15_dropped -= ibp->z_vl15_dropped;
1567 cntrs.vl15_dropped += ibp->n_vl15_dropped;
1568 cntrs.port_xmit_data -= ibp->z_port_xmit_data;
1569 cntrs.port_rcv_data -= ibp->z_port_rcv_data;
1570 cntrs.port_xmit_packets -= ibp->z_port_xmit_packets;
1571 cntrs.port_rcv_packets -= ibp->z_port_rcv_packets;
1572
1573 memset(pmp->reserved, 0, sizeof(pmp->reserved) +
1574 sizeof(pmp->data));
1575
1576 /*
1577 * Set top 3 bits to indicate interval in picoseconds in
1578 * remaining bits.
1579 */
1580 p->port_check_rate =
1581 cpu_to_be16((QIB_XMIT_RATE_PICO << 13) |
1582 (dd->psxmitwait_check_rate &
1583 ~(QIB_XMIT_RATE_PICO << 13)));
1584 p->port_adr_events = cpu_to_be64(0);
1585 p->port_xmit_wait = cpu_to_be64(xmit_wait_counter);
1586 p->port_xmit_data = cpu_to_be64(cntrs.port_xmit_data);
1587 p->port_rcv_data = cpu_to_be64(cntrs.port_rcv_data);
1588 p->port_xmit_packets =
1589 cpu_to_be64(cntrs.port_xmit_packets);
1590 p->port_rcv_packets =
1591 cpu_to_be64(cntrs.port_rcv_packets);
1592 if (cntrs.symbol_error_counter > 0xFFFFUL)
1593 p->symbol_error_counter = cpu_to_be16(0xFFFF);
1594 else
1595 p->symbol_error_counter =
1596 cpu_to_be16(
1597 (u16)cntrs.symbol_error_counter);
1598 if (cntrs.link_error_recovery_counter > 0xFFUL)
1599 p->link_error_recovery_counter = 0xFF;
1600 else
1601 p->link_error_recovery_counter =
1602 (u8)cntrs.link_error_recovery_counter;
1603 if (cntrs.link_downed_counter > 0xFFUL)
1604 p->link_downed_counter = 0xFF;
1605 else
1606 p->link_downed_counter =
1607 (u8)cntrs.link_downed_counter;
1608 if (cntrs.port_rcv_errors > 0xFFFFUL)
1609 p->port_rcv_errors = cpu_to_be16(0xFFFF);
1610 else
1611 p->port_rcv_errors =
1612 cpu_to_be16((u16) cntrs.port_rcv_errors);
1613 if (cntrs.port_rcv_remphys_errors > 0xFFFFUL)
1614 p->port_rcv_remphys_errors = cpu_to_be16(0xFFFF);
1615 else
1616 p->port_rcv_remphys_errors =
1617 cpu_to_be16(
1618 (u16)cntrs.port_rcv_remphys_errors);
1619 if (cntrs.port_xmit_discards > 0xFFFFUL)
1620 p->port_xmit_discards = cpu_to_be16(0xFFFF);
1621 else
1622 p->port_xmit_discards =
1623 cpu_to_be16((u16)cntrs.port_xmit_discards);
1624 if (cntrs.local_link_integrity_errors > 0xFUL)
1625 cntrs.local_link_integrity_errors = 0xFUL;
1626 if (cntrs.excessive_buffer_overrun_errors > 0xFUL)
1627 cntrs.excessive_buffer_overrun_errors = 0xFUL;
1628 p->link_overrun_errors = (cntrs.local_link_integrity_errors << 4) |
1629 cntrs.excessive_buffer_overrun_errors;
1630 if (cntrs.vl15_dropped > 0xFFFFUL)
1631 p->vl15_dropped = cpu_to_be16(0xFFFF);
1632 else
1633 p->vl15_dropped = cpu_to_be16((u16)cntrs.vl15_dropped);
1634
1635 return reply((struct ib_smp *)pmp);
1636 }
1637
1638 static void qib_snapshot_pmacounters(
1639 struct qib_ibport *ibp,
1640 struct qib_pma_counters *pmacounters)
1641 {
1642 struct qib_pma_counters *p;
1643 int cpu;
1644
1645 memset(pmacounters, 0, sizeof(*pmacounters));
1646 for_each_possible_cpu(cpu) {
1647 p = per_cpu_ptr(ibp->pmastats, cpu);
1648 pmacounters->n_unicast_xmit += p->n_unicast_xmit;
1649 pmacounters->n_unicast_rcv += p->n_unicast_rcv;
1650 pmacounters->n_multicast_xmit += p->n_multicast_xmit;
1651 pmacounters->n_multicast_rcv += p->n_multicast_rcv;
1652 }
1653 }
1654
1655 static int pma_get_portcounters_ext(struct ib_pma_mad *pmp,
1656 struct ib_device *ibdev, u8 port)
1657 {
1658 struct ib_pma_portcounters_ext *p =
1659 (struct ib_pma_portcounters_ext *)pmp->data;
1660 struct qib_ibport *ibp = to_iport(ibdev, port);
1661 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1662 u64 swords, rwords, spkts, rpkts, xwait;
1663 struct qib_pma_counters pma;
1664 u8 port_select = p->port_select;
1665
1666 memset(pmp->data, 0, sizeof(pmp->data));
1667
1668 p->port_select = port_select;
1669 if (pmp->mad_hdr.attr_mod != 0 || port_select != port) {
1670 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
1671 goto bail;
1672 }
1673
1674 qib_snapshot_counters(ppd, &swords, &rwords, &spkts, &rpkts, &xwait);
1675
1676 /* Adjust counters for any resets done. */
1677 swords -= ibp->z_port_xmit_data;
1678 rwords -= ibp->z_port_rcv_data;
1679 spkts -= ibp->z_port_xmit_packets;
1680 rpkts -= ibp->z_port_rcv_packets;
1681
1682 p->port_xmit_data = cpu_to_be64(swords);
1683 p->port_rcv_data = cpu_to_be64(rwords);
1684 p->port_xmit_packets = cpu_to_be64(spkts);
1685 p->port_rcv_packets = cpu_to_be64(rpkts);
1686
1687 qib_snapshot_pmacounters(ibp, &pma);
1688
1689 p->port_unicast_xmit_packets = cpu_to_be64(pma.n_unicast_xmit
1690 - ibp->z_unicast_xmit);
1691 p->port_unicast_rcv_packets = cpu_to_be64(pma.n_unicast_rcv
1692 - ibp->z_unicast_rcv);
1693 p->port_multicast_xmit_packets = cpu_to_be64(pma.n_multicast_xmit
1694 - ibp->z_multicast_xmit);
1695 p->port_multicast_rcv_packets = cpu_to_be64(pma.n_multicast_rcv
1696 - ibp->z_multicast_rcv);
1697
1698 bail:
1699 return reply((struct ib_smp *) pmp);
1700 }
1701
1702 static int pma_set_portcounters(struct ib_pma_mad *pmp,
1703 struct ib_device *ibdev, u8 port)
1704 {
1705 struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
1706 pmp->data;
1707 struct qib_ibport *ibp = to_iport(ibdev, port);
1708 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1709 struct qib_verbs_counters cntrs;
1710
1711 /*
1712 * Since the HW doesn't support clearing counters, we save the
1713 * current count and subtract it from future responses.
1714 */
1715 qib_get_counters(ppd, &cntrs);
1716
1717 if (p->counter_select & IB_PMA_SEL_SYMBOL_ERROR)
1718 ibp->z_symbol_error_counter = cntrs.symbol_error_counter;
1719
1720 if (p->counter_select & IB_PMA_SEL_LINK_ERROR_RECOVERY)
1721 ibp->z_link_error_recovery_counter =
1722 cntrs.link_error_recovery_counter;
1723
1724 if (p->counter_select & IB_PMA_SEL_LINK_DOWNED)
1725 ibp->z_link_downed_counter = cntrs.link_downed_counter;
1726
1727 if (p->counter_select & IB_PMA_SEL_PORT_RCV_ERRORS)
1728 ibp->z_port_rcv_errors = cntrs.port_rcv_errors;
1729
1730 if (p->counter_select & IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS)
1731 ibp->z_port_rcv_remphys_errors =
1732 cntrs.port_rcv_remphys_errors;
1733
1734 if (p->counter_select & IB_PMA_SEL_PORT_XMIT_DISCARDS)
1735 ibp->z_port_xmit_discards = cntrs.port_xmit_discards;
1736
1737 if (p->counter_select & IB_PMA_SEL_LOCAL_LINK_INTEGRITY_ERRORS)
1738 ibp->z_local_link_integrity_errors =
1739 cntrs.local_link_integrity_errors;
1740
1741 if (p->counter_select & IB_PMA_SEL_EXCESSIVE_BUFFER_OVERRUNS)
1742 ibp->z_excessive_buffer_overrun_errors =
1743 cntrs.excessive_buffer_overrun_errors;
1744
1745 if (p->counter_select & IB_PMA_SEL_PORT_VL15_DROPPED) {
1746 ibp->n_vl15_dropped = 0;
1747 ibp->z_vl15_dropped = cntrs.vl15_dropped;
1748 }
1749
1750 if (p->counter_select & IB_PMA_SEL_PORT_XMIT_DATA)
1751 ibp->z_port_xmit_data = cntrs.port_xmit_data;
1752
1753 if (p->counter_select & IB_PMA_SEL_PORT_RCV_DATA)
1754 ibp->z_port_rcv_data = cntrs.port_rcv_data;
1755
1756 if (p->counter_select & IB_PMA_SEL_PORT_XMIT_PACKETS)
1757 ibp->z_port_xmit_packets = cntrs.port_xmit_packets;
1758
1759 if (p->counter_select & IB_PMA_SEL_PORT_RCV_PACKETS)
1760 ibp->z_port_rcv_packets = cntrs.port_rcv_packets;
1761
1762 return pma_get_portcounters(pmp, ibdev, port);
1763 }
1764
1765 static int pma_set_portcounters_cong(struct ib_pma_mad *pmp,
1766 struct ib_device *ibdev, u8 port)
1767 {
1768 struct qib_ibport *ibp = to_iport(ibdev, port);
1769 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1770 struct qib_devdata *dd = dd_from_ppd(ppd);
1771 struct qib_verbs_counters cntrs;
1772 u32 counter_select = (be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24) & 0xFF;
1773 int ret = 0;
1774 unsigned long flags;
1775
1776 qib_get_counters(ppd, &cntrs);
1777 /* Get counter values before we save them */
1778 ret = pma_get_portcounters_cong(pmp, ibdev, port);
1779
1780 if (counter_select & IB_PMA_SEL_CONG_XMIT) {
1781 spin_lock_irqsave(&ppd->ibport_data.lock, flags);
1782 ppd->cong_stats.counter = 0;
1783 dd->f_set_cntr_sample(ppd, QIB_CONG_TIMER_PSINTERVAL,
1784 0x0);
1785 spin_unlock_irqrestore(&ppd->ibport_data.lock, flags);
1786 }
1787 if (counter_select & IB_PMA_SEL_CONG_PORT_DATA) {
1788 ibp->z_port_xmit_data = cntrs.port_xmit_data;
1789 ibp->z_port_rcv_data = cntrs.port_rcv_data;
1790 ibp->z_port_xmit_packets = cntrs.port_xmit_packets;
1791 ibp->z_port_rcv_packets = cntrs.port_rcv_packets;
1792 }
1793 if (counter_select & IB_PMA_SEL_CONG_ALL) {
1794 ibp->z_symbol_error_counter =
1795 cntrs.symbol_error_counter;
1796 ibp->z_link_error_recovery_counter =
1797 cntrs.link_error_recovery_counter;
1798 ibp->z_link_downed_counter =
1799 cntrs.link_downed_counter;
1800 ibp->z_port_rcv_errors = cntrs.port_rcv_errors;
1801 ibp->z_port_rcv_remphys_errors =
1802 cntrs.port_rcv_remphys_errors;
1803 ibp->z_port_xmit_discards =
1804 cntrs.port_xmit_discards;
1805 ibp->z_local_link_integrity_errors =
1806 cntrs.local_link_integrity_errors;
1807 ibp->z_excessive_buffer_overrun_errors =
1808 cntrs.excessive_buffer_overrun_errors;
1809 ibp->n_vl15_dropped = 0;
1810 ibp->z_vl15_dropped = cntrs.vl15_dropped;
1811 }
1812
1813 return ret;
1814 }
1815
1816 static int pma_set_portcounters_ext(struct ib_pma_mad *pmp,
1817 struct ib_device *ibdev, u8 port)
1818 {
1819 struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
1820 pmp->data;
1821 struct qib_ibport *ibp = to_iport(ibdev, port);
1822 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1823 u64 swords, rwords, spkts, rpkts, xwait;
1824 struct qib_pma_counters pma;
1825
1826 qib_snapshot_counters(ppd, &swords, &rwords, &spkts, &rpkts, &xwait);
1827
1828 if (p->counter_select & IB_PMA_SELX_PORT_XMIT_DATA)
1829 ibp->z_port_xmit_data = swords;
1830
1831 if (p->counter_select & IB_PMA_SELX_PORT_RCV_DATA)
1832 ibp->z_port_rcv_data = rwords;
1833
1834 if (p->counter_select & IB_PMA_SELX_PORT_XMIT_PACKETS)
1835 ibp->z_port_xmit_packets = spkts;
1836
1837 if (p->counter_select & IB_PMA_SELX_PORT_RCV_PACKETS)
1838 ibp->z_port_rcv_packets = rpkts;
1839
1840 qib_snapshot_pmacounters(ibp, &pma);
1841
1842 if (p->counter_select & IB_PMA_SELX_PORT_UNI_XMIT_PACKETS)
1843 ibp->z_unicast_xmit = pma.n_unicast_xmit;
1844
1845 if (p->counter_select & IB_PMA_SELX_PORT_UNI_RCV_PACKETS)
1846 ibp->z_unicast_rcv = pma.n_unicast_rcv;
1847
1848 if (p->counter_select & IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS)
1849 ibp->z_multicast_xmit = pma.n_multicast_xmit;
1850
1851 if (p->counter_select & IB_PMA_SELX_PORT_MULTI_RCV_PACKETS)
1852 ibp->z_multicast_rcv = pma.n_multicast_rcv;
1853
1854 return pma_get_portcounters_ext(pmp, ibdev, port);
1855 }
1856
1857 static int process_subn(struct ib_device *ibdev, int mad_flags,
1858 u8 port, const struct ib_mad *in_mad,
1859 struct ib_mad *out_mad)
1860 {
1861 struct ib_smp *smp = (struct ib_smp *)out_mad;
1862 struct qib_ibport *ibp = to_iport(ibdev, port);
1863 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1864 int ret;
1865
1866 *out_mad = *in_mad;
1867 if (smp->class_version != 1) {
1868 smp->status |= IB_SMP_UNSUP_VERSION;
1869 ret = reply(smp);
1870 goto bail;
1871 }
1872
1873 ret = check_mkey(ibp, smp, mad_flags);
1874 if (ret) {
1875 u32 port_num = be32_to_cpu(smp->attr_mod);
1876
1877 /*
1878 * If this is a get/set portinfo, we already check the
1879 * M_Key if the MAD is for another port and the M_Key
1880 * is OK on the receiving port. This check is needed
1881 * to increment the error counters when the M_Key
1882 * fails to match on *both* ports.
1883 */
1884 if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO &&
1885 (smp->method == IB_MGMT_METHOD_GET ||
1886 smp->method == IB_MGMT_METHOD_SET) &&
1887 port_num && port_num <= ibdev->phys_port_cnt &&
1888 port != port_num)
1889 (void) check_mkey(to_iport(ibdev, port_num), smp, 0);
1890 ret = IB_MAD_RESULT_FAILURE;
1891 goto bail;
1892 }
1893
1894 switch (smp->method) {
1895 case IB_MGMT_METHOD_GET:
1896 switch (smp->attr_id) {
1897 case IB_SMP_ATTR_NODE_DESC:
1898 ret = subn_get_nodedescription(smp, ibdev);
1899 goto bail;
1900 case IB_SMP_ATTR_NODE_INFO:
1901 ret = subn_get_nodeinfo(smp, ibdev, port);
1902 goto bail;
1903 case IB_SMP_ATTR_GUID_INFO:
1904 ret = subn_get_guidinfo(smp, ibdev, port);
1905 goto bail;
1906 case IB_SMP_ATTR_PORT_INFO:
1907 ret = subn_get_portinfo(smp, ibdev, port);
1908 goto bail;
1909 case IB_SMP_ATTR_PKEY_TABLE:
1910 ret = subn_get_pkeytable(smp, ibdev, port);
1911 goto bail;
1912 case IB_SMP_ATTR_SL_TO_VL_TABLE:
1913 ret = subn_get_sl_to_vl(smp, ibdev, port);
1914 goto bail;
1915 case IB_SMP_ATTR_VL_ARB_TABLE:
1916 ret = subn_get_vl_arb(smp, ibdev, port);
1917 goto bail;
1918 case IB_SMP_ATTR_SM_INFO:
1919 if (ibp->port_cap_flags & IB_PORT_SM_DISABLED) {
1920 ret = IB_MAD_RESULT_SUCCESS |
1921 IB_MAD_RESULT_CONSUMED;
1922 goto bail;
1923 }
1924 if (ibp->port_cap_flags & IB_PORT_SM) {
1925 ret = IB_MAD_RESULT_SUCCESS;
1926 goto bail;
1927 }
1928 /* FALLTHROUGH */
1929 default:
1930 smp->status |= IB_SMP_UNSUP_METH_ATTR;
1931 ret = reply(smp);
1932 goto bail;
1933 }
1934
1935 case IB_MGMT_METHOD_SET:
1936 switch (smp->attr_id) {
1937 case IB_SMP_ATTR_GUID_INFO:
1938 ret = subn_set_guidinfo(smp, ibdev, port);
1939 goto bail;
1940 case IB_SMP_ATTR_PORT_INFO:
1941 ret = subn_set_portinfo(smp, ibdev, port);
1942 goto bail;
1943 case IB_SMP_ATTR_PKEY_TABLE:
1944 ret = subn_set_pkeytable(smp, ibdev, port);
1945 goto bail;
1946 case IB_SMP_ATTR_SL_TO_VL_TABLE:
1947 ret = subn_set_sl_to_vl(smp, ibdev, port);
1948 goto bail;
1949 case IB_SMP_ATTR_VL_ARB_TABLE:
1950 ret = subn_set_vl_arb(smp, ibdev, port);
1951 goto bail;
1952 case IB_SMP_ATTR_SM_INFO:
1953 if (ibp->port_cap_flags & IB_PORT_SM_DISABLED) {
1954 ret = IB_MAD_RESULT_SUCCESS |
1955 IB_MAD_RESULT_CONSUMED;
1956 goto bail;
1957 }
1958 if (ibp->port_cap_flags & IB_PORT_SM) {
1959 ret = IB_MAD_RESULT_SUCCESS;
1960 goto bail;
1961 }
1962 /* FALLTHROUGH */
1963 default:
1964 smp->status |= IB_SMP_UNSUP_METH_ATTR;
1965 ret = reply(smp);
1966 goto bail;
1967 }
1968
1969 case IB_MGMT_METHOD_TRAP_REPRESS:
1970 if (smp->attr_id == IB_SMP_ATTR_NOTICE)
1971 ret = subn_trap_repress(smp, ibdev, port);
1972 else {
1973 smp->status |= IB_SMP_UNSUP_METH_ATTR;
1974 ret = reply(smp);
1975 }
1976 goto bail;
1977
1978 case IB_MGMT_METHOD_TRAP:
1979 case IB_MGMT_METHOD_REPORT:
1980 case IB_MGMT_METHOD_REPORT_RESP:
1981 case IB_MGMT_METHOD_GET_RESP:
1982 /*
1983 * The ib_mad module will call us to process responses
1984 * before checking for other consumers.
1985 * Just tell the caller to process it normally.
1986 */
1987 ret = IB_MAD_RESULT_SUCCESS;
1988 goto bail;
1989
1990 case IB_MGMT_METHOD_SEND:
1991 if (ib_get_smp_direction(smp) &&
1992 smp->attr_id == QIB_VENDOR_IPG) {
1993 ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PORT,
1994 smp->data[0]);
1995 ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
1996 } else
1997 ret = IB_MAD_RESULT_SUCCESS;
1998 goto bail;
1999
2000 default:
2001 smp->status |= IB_SMP_UNSUP_METHOD;
2002 ret = reply(smp);
2003 }
2004
2005 bail:
2006 return ret;
2007 }
2008
2009 static int process_perf(struct ib_device *ibdev, u8 port,
2010 const struct ib_mad *in_mad,
2011 struct ib_mad *out_mad)
2012 {
2013 struct ib_pma_mad *pmp = (struct ib_pma_mad *)out_mad;
2014 int ret;
2015
2016 *out_mad = *in_mad;
2017 if (pmp->mad_hdr.class_version != 1) {
2018 pmp->mad_hdr.status |= IB_SMP_UNSUP_VERSION;
2019 ret = reply((struct ib_smp *) pmp);
2020 goto bail;
2021 }
2022
2023 switch (pmp->mad_hdr.method) {
2024 case IB_MGMT_METHOD_GET:
2025 switch (pmp->mad_hdr.attr_id) {
2026 case IB_PMA_CLASS_PORT_INFO:
2027 ret = pma_get_classportinfo(pmp, ibdev);
2028 goto bail;
2029 case IB_PMA_PORT_SAMPLES_CONTROL:
2030 ret = pma_get_portsamplescontrol(pmp, ibdev, port);
2031 goto bail;
2032 case IB_PMA_PORT_SAMPLES_RESULT:
2033 ret = pma_get_portsamplesresult(pmp, ibdev, port);
2034 goto bail;
2035 case IB_PMA_PORT_SAMPLES_RESULT_EXT:
2036 ret = pma_get_portsamplesresult_ext(pmp, ibdev, port);
2037 goto bail;
2038 case IB_PMA_PORT_COUNTERS:
2039 ret = pma_get_portcounters(pmp, ibdev, port);
2040 goto bail;
2041 case IB_PMA_PORT_COUNTERS_EXT:
2042 ret = pma_get_portcounters_ext(pmp, ibdev, port);
2043 goto bail;
2044 case IB_PMA_PORT_COUNTERS_CONG:
2045 ret = pma_get_portcounters_cong(pmp, ibdev, port);
2046 goto bail;
2047 default:
2048 pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
2049 ret = reply((struct ib_smp *) pmp);
2050 goto bail;
2051 }
2052
2053 case IB_MGMT_METHOD_SET:
2054 switch (pmp->mad_hdr.attr_id) {
2055 case IB_PMA_PORT_SAMPLES_CONTROL:
2056 ret = pma_set_portsamplescontrol(pmp, ibdev, port);
2057 goto bail;
2058 case IB_PMA_PORT_COUNTERS:
2059 ret = pma_set_portcounters(pmp, ibdev, port);
2060 goto bail;
2061 case IB_PMA_PORT_COUNTERS_EXT:
2062 ret = pma_set_portcounters_ext(pmp, ibdev, port);
2063 goto bail;
2064 case IB_PMA_PORT_COUNTERS_CONG:
2065 ret = pma_set_portcounters_cong(pmp, ibdev, port);
2066 goto bail;
2067 default:
2068 pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
2069 ret = reply((struct ib_smp *) pmp);
2070 goto bail;
2071 }
2072
2073 case IB_MGMT_METHOD_TRAP:
2074 case IB_MGMT_METHOD_GET_RESP:
2075 /*
2076 * The ib_mad module will call us to process responses
2077 * before checking for other consumers.
2078 * Just tell the caller to process it normally.
2079 */
2080 ret = IB_MAD_RESULT_SUCCESS;
2081 goto bail;
2082
2083 default:
2084 pmp->mad_hdr.status |= IB_SMP_UNSUP_METHOD;
2085 ret = reply((struct ib_smp *) pmp);
2086 }
2087
2088 bail:
2089 return ret;
2090 }
2091
2092 static int cc_get_classportinfo(struct ib_cc_mad *ccp,
2093 struct ib_device *ibdev)
2094 {
2095 struct ib_cc_classportinfo_attr *p =
2096 (struct ib_cc_classportinfo_attr *)ccp->mgmt_data;
2097
2098 memset(ccp->mgmt_data, 0, sizeof(ccp->mgmt_data));
2099
2100 p->base_version = 1;
2101 p->class_version = 1;
2102 p->cap_mask = 0;
2103
2104 /*
2105 * Expected response time is 4.096 usec. * 2^18 == 1.073741824 sec.
2106 */
2107 p->resp_time_value = 18;
2108
2109 return reply((struct ib_smp *) ccp);
2110 }
2111
2112 static int cc_get_congestion_info(struct ib_cc_mad *ccp,
2113 struct ib_device *ibdev, u8 port)
2114 {
2115 struct ib_cc_info_attr *p =
2116 (struct ib_cc_info_attr *)ccp->mgmt_data;
2117 struct qib_ibport *ibp = to_iport(ibdev, port);
2118 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
2119
2120 memset(ccp->mgmt_data, 0, sizeof(ccp->mgmt_data));
2121
2122 p->congestion_info = 0;
2123 p->control_table_cap = ppd->cc_max_table_entries;
2124
2125 return reply((struct ib_smp *) ccp);
2126 }
2127
2128 static int cc_get_congestion_setting(struct ib_cc_mad *ccp,
2129 struct ib_device *ibdev, u8 port)
2130 {
2131 int i;
2132 struct ib_cc_congestion_setting_attr *p =
2133 (struct ib_cc_congestion_setting_attr *)ccp->mgmt_data;
2134 struct qib_ibport *ibp = to_iport(ibdev, port);
2135 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
2136 struct ib_cc_congestion_entry_shadow *entries;
2137
2138 memset(ccp->mgmt_data, 0, sizeof(ccp->mgmt_data));
2139
2140 spin_lock(&ppd->cc_shadow_lock);
2141
2142 entries = ppd->congestion_entries_shadow->entries;
2143 p->port_control = cpu_to_be16(
2144 ppd->congestion_entries_shadow->port_control);
2145 p->control_map = cpu_to_be16(
2146 ppd->congestion_entries_shadow->control_map);
2147 for (i = 0; i < IB_CC_CCS_ENTRIES; i++) {
2148 p->entries[i].ccti_increase = entries[i].ccti_increase;
2149 p->entries[i].ccti_timer = cpu_to_be16(entries[i].ccti_timer);
2150 p->entries[i].trigger_threshold = entries[i].trigger_threshold;
2151 p->entries[i].ccti_min = entries[i].ccti_min;
2152 }
2153
2154 spin_unlock(&ppd->cc_shadow_lock);
2155
2156 return reply((struct ib_smp *) ccp);
2157 }
2158
2159 static int cc_get_congestion_control_table(struct ib_cc_mad *ccp,
2160 struct ib_device *ibdev, u8 port)
2161 {
2162 struct ib_cc_table_attr *p =
2163 (struct ib_cc_table_attr *)ccp->mgmt_data;
2164 struct qib_ibport *ibp = to_iport(ibdev, port);
2165 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
2166 u32 cct_block_index = be32_to_cpu(ccp->attr_mod);
2167 u32 max_cct_block;
2168 u32 cct_entry;
2169 struct ib_cc_table_entry_shadow *entries;
2170 int i;
2171
2172 /* Is the table index more than what is supported? */
2173 if (cct_block_index > IB_CC_TABLE_CAP_DEFAULT - 1)
2174 goto bail;
2175
2176 memset(ccp->mgmt_data, 0, sizeof(ccp->mgmt_data));
2177
2178 spin_lock(&ppd->cc_shadow_lock);
2179
2180 max_cct_block =
2181 (ppd->ccti_entries_shadow->ccti_last_entry + 1)/IB_CCT_ENTRIES;
2182 max_cct_block = max_cct_block ? max_cct_block - 1 : 0;
2183
2184 if (cct_block_index > max_cct_block) {
2185 spin_unlock(&ppd->cc_shadow_lock);
2186 goto bail;
2187 }
2188
2189 ccp->attr_mod = cpu_to_be32(cct_block_index);
2190
2191 cct_entry = IB_CCT_ENTRIES * (cct_block_index + 1);
2192
2193 cct_entry--;
2194
2195 p->ccti_limit = cpu_to_be16(cct_entry);
2196
2197 entries = &ppd->ccti_entries_shadow->
2198 entries[IB_CCT_ENTRIES * cct_block_index];
2199 cct_entry %= IB_CCT_ENTRIES;
2200
2201 for (i = 0; i <= cct_entry; i++)
2202 p->ccti_entries[i].entry = cpu_to_be16(entries[i].entry);
2203
2204 spin_unlock(&ppd->cc_shadow_lock);
2205
2206 return reply((struct ib_smp *) ccp);
2207
2208 bail:
2209 return reply_failure((struct ib_smp *) ccp);
2210 }
2211
2212 static int cc_set_congestion_setting(struct ib_cc_mad *ccp,
2213 struct ib_device *ibdev, u8 port)
2214 {
2215 struct ib_cc_congestion_setting_attr *p =
2216 (struct ib_cc_congestion_setting_attr *)ccp->mgmt_data;
2217 struct qib_ibport *ibp = to_iport(ibdev, port);
2218 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
2219 int i;
2220
2221 ppd->cc_sl_control_map = be16_to_cpu(p->control_map);
2222
2223 for (i = 0; i < IB_CC_CCS_ENTRIES; i++) {
2224 ppd->congestion_entries[i].ccti_increase =
2225 p->entries[i].ccti_increase;
2226
2227 ppd->congestion_entries[i].ccti_timer =
2228 be16_to_cpu(p->entries[i].ccti_timer);
2229
2230 ppd->congestion_entries[i].trigger_threshold =
2231 p->entries[i].trigger_threshold;
2232
2233 ppd->congestion_entries[i].ccti_min =
2234 p->entries[i].ccti_min;
2235 }
2236
2237 return reply((struct ib_smp *) ccp);
2238 }
2239
2240 static int cc_set_congestion_control_table(struct ib_cc_mad *ccp,
2241 struct ib_device *ibdev, u8 port)
2242 {
2243 struct ib_cc_table_attr *p =
2244 (struct ib_cc_table_attr *)ccp->mgmt_data;
2245 struct qib_ibport *ibp = to_iport(ibdev, port);
2246 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
2247 u32 cct_block_index = be32_to_cpu(ccp->attr_mod);
2248 u32 cct_entry;
2249 struct ib_cc_table_entry_shadow *entries;
2250 int i;
2251
2252 /* Is the table index more than what is supported? */
2253 if (cct_block_index > IB_CC_TABLE_CAP_DEFAULT - 1)
2254 goto bail;
2255
2256 /* If this packet is the first in the sequence then
2257 * zero the total table entry count.
2258 */
2259 if (be16_to_cpu(p->ccti_limit) < IB_CCT_ENTRIES)
2260 ppd->total_cct_entry = 0;
2261
2262 cct_entry = (be16_to_cpu(p->ccti_limit))%IB_CCT_ENTRIES;
2263
2264 /* ccti_limit is 0 to 63 */
2265 ppd->total_cct_entry += (cct_entry + 1);
2266
2267 if (ppd->total_cct_entry > ppd->cc_supported_table_entries)
2268 goto bail;
2269
2270 ppd->ccti_limit = be16_to_cpu(p->ccti_limit);
2271
2272 entries = ppd->ccti_entries + (IB_CCT_ENTRIES * cct_block_index);
2273
2274 for (i = 0; i <= cct_entry; i++)
2275 entries[i].entry = be16_to_cpu(p->ccti_entries[i].entry);
2276
2277 spin_lock(&ppd->cc_shadow_lock);
2278
2279 ppd->ccti_entries_shadow->ccti_last_entry = ppd->total_cct_entry - 1;
2280 memcpy(ppd->ccti_entries_shadow->entries, ppd->ccti_entries,
2281 (ppd->total_cct_entry * sizeof(struct ib_cc_table_entry)));
2282
2283 ppd->congestion_entries_shadow->port_control = IB_CC_CCS_PC_SL_BASED;
2284 ppd->congestion_entries_shadow->control_map = ppd->cc_sl_control_map;
2285 memcpy(ppd->congestion_entries_shadow->entries, ppd->congestion_entries,
2286 IB_CC_CCS_ENTRIES * sizeof(struct ib_cc_congestion_entry));
2287
2288 spin_unlock(&ppd->cc_shadow_lock);
2289
2290 return reply((struct ib_smp *) ccp);
2291
2292 bail:
2293 return reply_failure((struct ib_smp *) ccp);
2294 }
2295
2296 static int check_cc_key(struct qib_ibport *ibp,
2297 struct ib_cc_mad *ccp, int mad_flags)
2298 {
2299 return 0;
2300 }
2301
2302 static int process_cc(struct ib_device *ibdev, int mad_flags,
2303 u8 port, const struct ib_mad *in_mad,
2304 struct ib_mad *out_mad)
2305 {
2306 struct ib_cc_mad *ccp = (struct ib_cc_mad *)out_mad;
2307 struct qib_ibport *ibp = to_iport(ibdev, port);
2308 int ret;
2309
2310 *out_mad = *in_mad;
2311
2312 if (ccp->class_version != 2) {
2313 ccp->status |= IB_SMP_UNSUP_VERSION;
2314 ret = reply((struct ib_smp *)ccp);
2315 goto bail;
2316 }
2317
2318 ret = check_cc_key(ibp, ccp, mad_flags);
2319 if (ret)
2320 goto bail;
2321
2322 switch (ccp->method) {
2323 case IB_MGMT_METHOD_GET:
2324 switch (ccp->attr_id) {
2325 case IB_CC_ATTR_CLASSPORTINFO:
2326 ret = cc_get_classportinfo(ccp, ibdev);
2327 goto bail;
2328
2329 case IB_CC_ATTR_CONGESTION_INFO:
2330 ret = cc_get_congestion_info(ccp, ibdev, port);
2331 goto bail;
2332
2333 case IB_CC_ATTR_CA_CONGESTION_SETTING:
2334 ret = cc_get_congestion_setting(ccp, ibdev, port);
2335 goto bail;
2336
2337 case IB_CC_ATTR_CONGESTION_CONTROL_TABLE:
2338 ret = cc_get_congestion_control_table(ccp, ibdev, port);
2339 goto bail;
2340
2341 /* FALLTHROUGH */
2342 default:
2343 ccp->status |= IB_SMP_UNSUP_METH_ATTR;
2344 ret = reply((struct ib_smp *) ccp);
2345 goto bail;
2346 }
2347
2348 case IB_MGMT_METHOD_SET:
2349 switch (ccp->attr_id) {
2350 case IB_CC_ATTR_CA_CONGESTION_SETTING:
2351 ret = cc_set_congestion_setting(ccp, ibdev, port);
2352 goto bail;
2353
2354 case IB_CC_ATTR_CONGESTION_CONTROL_TABLE:
2355 ret = cc_set_congestion_control_table(ccp, ibdev, port);
2356 goto bail;
2357
2358 /* FALLTHROUGH */
2359 default:
2360 ccp->status |= IB_SMP_UNSUP_METH_ATTR;
2361 ret = reply((struct ib_smp *) ccp);
2362 goto bail;
2363 }
2364
2365 case IB_MGMT_METHOD_GET_RESP:
2366 /*
2367 * The ib_mad module will call us to process responses
2368 * before checking for other consumers.
2369 * Just tell the caller to process it normally.
2370 */
2371 ret = IB_MAD_RESULT_SUCCESS;
2372 goto bail;
2373
2374 case IB_MGMT_METHOD_TRAP:
2375 default:
2376 ccp->status |= IB_SMP_UNSUP_METHOD;
2377 ret = reply((struct ib_smp *) ccp);
2378 }
2379
2380 bail:
2381 return ret;
2382 }
2383
2384 /**
2385 * qib_process_mad - process an incoming MAD packet
2386 * @ibdev: the infiniband device this packet came in on
2387 * @mad_flags: MAD flags
2388 * @port: the port number this packet came in on
2389 * @in_wc: the work completion entry for this packet
2390 * @in_grh: the global route header for this packet
2391 * @in_mad: the incoming MAD
2392 * @out_mad: any outgoing MAD reply
2393 *
2394 * Returns IB_MAD_RESULT_SUCCESS if this is a MAD that we are not
2395 * interested in processing.
2396 *
2397 * Note that the verbs framework has already done the MAD sanity checks,
2398 * and hop count/pointer updating for IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
2399 * MADs.
2400 *
2401 * This is called by the ib_mad module.
2402 */
2403 int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port,
2404 const struct ib_wc *in_wc, const struct ib_grh *in_grh,
2405 const struct ib_mad_hdr *in, size_t in_mad_size,
2406 struct ib_mad_hdr *out, size_t *out_mad_size,
2407 u16 *out_mad_pkey_index)
2408 {
2409 int ret;
2410 struct qib_ibport *ibp = to_iport(ibdev, port);
2411 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
2412 const struct ib_mad *in_mad = (const struct ib_mad *)in;
2413 struct ib_mad *out_mad = (struct ib_mad *)out;
2414
2415 BUG_ON(in_mad_size != sizeof(*in_mad) ||
2416 *out_mad_size != sizeof(*out_mad));
2417
2418 switch (in_mad->mad_hdr.mgmt_class) {
2419 case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
2420 case IB_MGMT_CLASS_SUBN_LID_ROUTED:
2421 ret = process_subn(ibdev, mad_flags, port, in_mad, out_mad);
2422 goto bail;
2423
2424 case IB_MGMT_CLASS_PERF_MGMT:
2425 ret = process_perf(ibdev, port, in_mad, out_mad);
2426 goto bail;
2427
2428 case IB_MGMT_CLASS_CONG_MGMT:
2429 if (!ppd->congestion_entries_shadow ||
2430 !qib_cc_table_size) {
2431 ret = IB_MAD_RESULT_SUCCESS;
2432 goto bail;
2433 }
2434 ret = process_cc(ibdev, mad_flags, port, in_mad, out_mad);
2435 goto bail;
2436
2437 default:
2438 ret = IB_MAD_RESULT_SUCCESS;
2439 }
2440
2441 bail:
2442 return ret;
2443 }
2444
2445 static void send_handler(struct ib_mad_agent *agent,
2446 struct ib_mad_send_wc *mad_send_wc)
2447 {
2448 ib_free_send_mad(mad_send_wc->send_buf);
2449 }
2450
2451 static void xmit_wait_timer_func(unsigned long opaque)
2452 {
2453 struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
2454 struct qib_devdata *dd = dd_from_ppd(ppd);
2455 unsigned long flags;
2456 u8 status;
2457
2458 spin_lock_irqsave(&ppd->ibport_data.lock, flags);
2459 if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_SAMPLE) {
2460 status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
2461 if (status == IB_PMA_SAMPLE_STATUS_DONE) {
2462 /* save counter cache */
2463 cache_hw_sample_counters(ppd);
2464 ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_TIMER;
2465 } else
2466 goto done;
2467 }
2468 ppd->cong_stats.counter = xmit_wait_get_value_delta(ppd);
2469 dd->f_set_cntr_sample(ppd, QIB_CONG_TIMER_PSINTERVAL, 0x0);
2470 done:
2471 spin_unlock_irqrestore(&ppd->ibport_data.lock, flags);
2472 mod_timer(&ppd->cong_stats.timer, jiffies + HZ);
2473 }
2474
2475 int qib_create_agents(struct qib_ibdev *dev)
2476 {
2477 struct qib_devdata *dd = dd_from_dev(dev);
2478 struct ib_mad_agent *agent;
2479 struct qib_ibport *ibp;
2480 int p;
2481 int ret;
2482
2483 for (p = 0; p < dd->num_pports; p++) {
2484 ibp = &dd->pport[p].ibport_data;
2485 agent = ib_register_mad_agent(&dev->ibdev, p + 1, IB_QPT_SMI,
2486 NULL, 0, send_handler,
2487 NULL, NULL, 0);
2488 if (IS_ERR(agent)) {
2489 ret = PTR_ERR(agent);
2490 goto err;
2491 }
2492
2493 /* Initialize xmit_wait structure */
2494 dd->pport[p].cong_stats.counter = 0;
2495 init_timer(&dd->pport[p].cong_stats.timer);
2496 dd->pport[p].cong_stats.timer.function = xmit_wait_timer_func;
2497 dd->pport[p].cong_stats.timer.data =
2498 (unsigned long)(&dd->pport[p]);
2499 dd->pport[p].cong_stats.timer.expires = 0;
2500 add_timer(&dd->pport[p].cong_stats.timer);
2501
2502 ibp->send_agent = agent;
2503 }
2504
2505 return 0;
2506
2507 err:
2508 for (p = 0; p < dd->num_pports; p++) {
2509 ibp = &dd->pport[p].ibport_data;
2510 if (ibp->send_agent) {
2511 agent = ibp->send_agent;
2512 ibp->send_agent = NULL;
2513 ib_unregister_mad_agent(agent);
2514 }
2515 }
2516
2517 return ret;
2518 }
2519
2520 void qib_free_agents(struct qib_ibdev *dev)
2521 {
2522 struct qib_devdata *dd = dd_from_dev(dev);
2523 struct ib_mad_agent *agent;
2524 struct qib_ibport *ibp;
2525 int p;
2526
2527 for (p = 0; p < dd->num_pports; p++) {
2528 ibp = &dd->pport[p].ibport_data;
2529 if (ibp->send_agent) {
2530 agent = ibp->send_agent;
2531 ibp->send_agent = NULL;
2532 ib_unregister_mad_agent(agent);
2533 }
2534 if (ibp->sm_ah) {
2535 ib_destroy_ah(&ibp->sm_ah->ibah);
2536 ibp->sm_ah = NULL;
2537 }
2538 if (dd->pport[p].cong_stats.timer.data)
2539 del_timer_sync(&dd->pport[p].cong_stats.timer);
2540 }
2541 }
This page took 0.119259 seconds and 5 git commands to generate.