Commit | Line | Data |
---|---|---|
77241056 MM |
1 | /* |
2 | * | |
3 | * This file is provided under a dual BSD/GPLv2 license. When using or | |
4 | * redistributing this file, you may do so under either license. | |
5 | * | |
6 | * GPL LICENSE SUMMARY | |
7 | * | |
8 | * Copyright(c) 2015 Intel Corporation. | |
9 | * | |
10 | * This program is free software; you can redistribute it and/or modify | |
11 | * it under the terms of version 2 of the GNU General Public License as | |
12 | * published by the Free Software Foundation. | |
13 | * | |
14 | * This program is distributed in the hope that it will be useful, but | |
15 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
17 | * General Public License for more details. | |
18 | * | |
19 | * BSD LICENSE | |
20 | * | |
21 | * Copyright(c) 2015 Intel Corporation. | |
22 | * | |
23 | * Redistribution and use in source and binary forms, with or without | |
24 | * modification, are permitted provided that the following conditions | |
25 | * are met: | |
26 | * | |
27 | * - Redistributions of source code must retain the above copyright | |
28 | * notice, this list of conditions and the following disclaimer. | |
29 | * - Redistributions in binary form must reproduce the above copyright | |
30 | * notice, this list of conditions and the following disclaimer in | |
31 | * the documentation and/or other materials provided with the | |
32 | * distribution. | |
33 | * - Neither the name of Intel Corporation nor the names of its | |
34 | * contributors may be used to endorse or promote products derived | |
35 | * from this software without specific prior written permission. | |
36 | * | |
37 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
38 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
39 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
40 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
41 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
42 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
43 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
44 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
45 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
46 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
47 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
48 | * | |
49 | */ | |
50 | ||
51 | #include <linux/pci.h> | |
52 | #include <linux/netdevice.h> | |
53 | #include <linux/vmalloc.h> | |
54 | #include <linux/delay.h> | |
55 | #include <linux/idr.h> | |
56 | #include <linux/module.h> | |
57 | #include <linux/printk.h> | |
58 | #include <linux/hrtimer.h> | |
59 | ||
60 | #include "hfi.h" | |
61 | #include "device.h" | |
62 | #include "common.h" | |
6c63e423 | 63 | #include "trace.h" |
77241056 MM |
64 | #include "mad.h" |
65 | #include "sdma.h" | |
66 | #include "debugfs.h" | |
67 | #include "verbs.h" | |
68 | ||
69 | #undef pr_fmt | |
70 | #define pr_fmt(fmt) DRIVER_NAME ": " fmt | |
71 | ||
72 | /* | |
73 | * min buffers we want to have per context, after driver | |
74 | */ | |
75 | #define HFI1_MIN_USER_CTXT_BUFCNT 7 | |
76 | ||
77 | #define HFI1_MIN_HDRQ_EGRBUF_CNT 2 | |
78 | #define HFI1_MIN_EAGER_BUFFER_SIZE (4 * 1024) /* 4KB */ | |
79 | #define HFI1_MAX_EAGER_BUFFER_SIZE (256 * 1024) /* 256KB */ | |
80 | ||
81 | /* | |
82 | * Number of user receive contexts we are configured to use (to allow for more | |
83 | * pio buffers per ctxt, etc.) Zero means use one user context per CPU. | |
84 | */ | |
85 | uint num_rcv_contexts; | |
86 | module_param_named(num_rcv_contexts, num_rcv_contexts, uint, S_IRUGO); | |
87 | MODULE_PARM_DESC( | |
88 | num_rcv_contexts, "Set max number of user receive contexts to use"); | |
89 | ||
90 | u8 krcvqs[RXE_NUM_DATA_VL]; | |
91 | int krcvqsset; | |
92 | module_param_array(krcvqs, byte, &krcvqsset, S_IRUGO); | |
93 | MODULE_PARM_DESC(krcvqs, "Array of the number of kernel receive queues by VL"); | |
94 | ||
95 | /* computed based on above array */ | |
96 | unsigned n_krcvqs; | |
97 | ||
98 | static unsigned hfi1_rcvarr_split = 25; | |
99 | module_param_named(rcvarr_split, hfi1_rcvarr_split, uint, S_IRUGO); | |
100 | MODULE_PARM_DESC(rcvarr_split, "Percent of context's RcvArray entries used for Eager buffers"); | |
101 | ||
102 | static uint eager_buffer_size = (2 << 20); /* 2MB */ | |
103 | module_param(eager_buffer_size, uint, S_IRUGO); | |
104 | MODULE_PARM_DESC(eager_buffer_size, "Size of the eager buffers, default: 2MB"); | |
105 | ||
106 | static uint rcvhdrcnt = 2048; /* 2x the max eager buffer count */ | |
107 | module_param_named(rcvhdrcnt, rcvhdrcnt, uint, S_IRUGO); | |
108 | MODULE_PARM_DESC(rcvhdrcnt, "Receive header queue count (default 2048)"); | |
109 | ||
110 | static uint hfi1_hdrq_entsize = 32; | |
111 | module_param_named(hdrq_entsize, hfi1_hdrq_entsize, uint, S_IRUGO); | |
112 | MODULE_PARM_DESC(hdrq_entsize, "Size of header queue entries: 2 - 8B, 16 - 64B (default), 32 - 128B"); | |
113 | ||
114 | unsigned int user_credit_return_threshold = 33; /* default is 33% */ | |
115 | module_param(user_credit_return_threshold, uint, S_IRUGO); | |
116 | MODULE_PARM_DESC(user_credit_return_theshold, "Credit return threshold for user send contexts, return when unreturned credits passes this many blocks (in percent of allocated blocks, 0 is off)"); | |
117 | ||
118 | static inline u64 encode_rcv_header_entry_size(u16); | |
119 | ||
120 | static struct idr hfi1_unit_table; | |
121 | u32 hfi1_cpulist_count; | |
122 | unsigned long *hfi1_cpulist; | |
123 | ||
124 | /* | |
125 | * Common code for creating the receive context array. | |
126 | */ | |
127 | int hfi1_create_ctxts(struct hfi1_devdata *dd) | |
128 | { | |
129 | unsigned i; | |
130 | int ret; | |
131 | int local_node_id = pcibus_to_node(dd->pcidev->bus); | |
132 | ||
133 | if (local_node_id < 0) | |
134 | local_node_id = numa_node_id(); | |
135 | dd->assigned_node_id = local_node_id; | |
136 | ||
137 | dd->rcd = kcalloc(dd->num_rcv_contexts, sizeof(*dd->rcd), GFP_KERNEL); | |
806e6e1b | 138 | if (!dd->rcd) |
77241056 | 139 | goto nomem; |
77241056 MM |
140 | |
141 | /* create one or more kernel contexts */ | |
142 | for (i = 0; i < dd->first_user_ctxt; ++i) { | |
143 | struct hfi1_pportdata *ppd; | |
144 | struct hfi1_ctxtdata *rcd; | |
145 | ||
146 | ppd = dd->pport + (i % dd->num_pports); | |
147 | rcd = hfi1_create_ctxtdata(ppd, i); | |
148 | if (!rcd) { | |
149 | dd_dev_err(dd, | |
150 | "Unable to allocate kernel receive context, failing\n"); | |
151 | goto nomem; | |
152 | } | |
153 | /* | |
154 | * Set up the kernel context flags here and now because they | |
155 | * use default values for all receive side memories. User | |
156 | * contexts will be handled as they are created. | |
157 | */ | |
158 | rcd->flags = HFI1_CAP_KGET(MULTI_PKT_EGR) | | |
159 | HFI1_CAP_KGET(NODROP_RHQ_FULL) | | |
160 | HFI1_CAP_KGET(NODROP_EGR_FULL) | | |
161 | HFI1_CAP_KGET(DMA_RTAIL); | |
162 | rcd->seq_cnt = 1; | |
163 | ||
164 | rcd->sc = sc_alloc(dd, SC_ACK, rcd->rcvhdrqentsize, dd->node); | |
165 | if (!rcd->sc) { | |
166 | dd_dev_err(dd, | |
167 | "Unable to allocate kernel send context, failing\n"); | |
168 | dd->rcd[rcd->ctxt] = NULL; | |
169 | hfi1_free_ctxtdata(dd, rcd); | |
170 | goto nomem; | |
171 | } | |
172 | ||
173 | ret = hfi1_init_ctxt(rcd->sc); | |
174 | if (ret < 0) { | |
175 | dd_dev_err(dd, | |
176 | "Failed to setup kernel receive context, failing\n"); | |
177 | sc_free(rcd->sc); | |
178 | dd->rcd[rcd->ctxt] = NULL; | |
179 | hfi1_free_ctxtdata(dd, rcd); | |
180 | ret = -EFAULT; | |
181 | goto bail; | |
182 | } | |
183 | } | |
184 | ||
185 | return 0; | |
186 | nomem: | |
187 | ret = -ENOMEM; | |
188 | bail: | |
189 | kfree(dd->rcd); | |
190 | dd->rcd = NULL; | |
191 | return ret; | |
192 | } | |
193 | ||
194 | /* | |
195 | * Common code for user and kernel context setup. | |
196 | */ | |
197 | struct hfi1_ctxtdata *hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, u32 ctxt) | |
198 | { | |
199 | struct hfi1_devdata *dd = ppd->dd; | |
200 | struct hfi1_ctxtdata *rcd; | |
201 | unsigned kctxt_ngroups = 0; | |
202 | u32 base; | |
203 | ||
204 | if (dd->rcv_entries.nctxt_extra > | |
205 | dd->num_rcv_contexts - dd->first_user_ctxt) | |
206 | kctxt_ngroups = (dd->rcv_entries.nctxt_extra - | |
207 | (dd->num_rcv_contexts - dd->first_user_ctxt)); | |
208 | rcd = kzalloc(sizeof(*rcd), GFP_KERNEL); | |
209 | if (rcd) { | |
210 | u32 rcvtids, max_entries; | |
211 | ||
6c63e423 | 212 | hfi1_cdbg(PROC, "setting up context %u\n", ctxt); |
77241056 MM |
213 | |
214 | INIT_LIST_HEAD(&rcd->qp_wait_list); | |
215 | rcd->ppd = ppd; | |
216 | rcd->dd = dd; | |
217 | rcd->cnt = 1; | |
218 | rcd->ctxt = ctxt; | |
219 | dd->rcd[ctxt] = rcd; | |
220 | rcd->numa_id = numa_node_id(); | |
221 | rcd->rcv_array_groups = dd->rcv_entries.ngroups; | |
222 | ||
223 | spin_lock_init(&rcd->exp_lock); | |
224 | ||
225 | /* | |
226 | * Calculate the context's RcvArray entry starting point. | |
227 | * We do this here because we have to take into account all | |
228 | * the RcvArray entries that previous context would have | |
229 | * taken and we have to account for any extra groups | |
230 | * assigned to the kernel or user contexts. | |
231 | */ | |
232 | if (ctxt < dd->first_user_ctxt) { | |
233 | if (ctxt < kctxt_ngroups) { | |
234 | base = ctxt * (dd->rcv_entries.ngroups + 1); | |
235 | rcd->rcv_array_groups++; | |
236 | } else | |
237 | base = kctxt_ngroups + | |
238 | (ctxt * dd->rcv_entries.ngroups); | |
239 | } else { | |
240 | u16 ct = ctxt - dd->first_user_ctxt; | |
241 | ||
242 | base = ((dd->n_krcv_queues * dd->rcv_entries.ngroups) + | |
243 | kctxt_ngroups); | |
244 | if (ct < dd->rcv_entries.nctxt_extra) { | |
245 | base += ct * (dd->rcv_entries.ngroups + 1); | |
246 | rcd->rcv_array_groups++; | |
247 | } else | |
248 | base += dd->rcv_entries.nctxt_extra + | |
249 | (ct * dd->rcv_entries.ngroups); | |
250 | } | |
251 | rcd->eager_base = base * dd->rcv_entries.group_size; | |
252 | ||
253 | /* Validate and initialize Rcv Hdr Q variables */ | |
254 | if (rcvhdrcnt % HDRQ_INCREMENT) { | |
255 | dd_dev_err(dd, | |
256 | "ctxt%u: header queue count %d must be divisible by %d\n", | |
257 | rcd->ctxt, rcvhdrcnt, HDRQ_INCREMENT); | |
258 | goto bail; | |
259 | } | |
260 | rcd->rcvhdrq_cnt = rcvhdrcnt; | |
261 | rcd->rcvhdrqentsize = hfi1_hdrq_entsize; | |
262 | /* | |
263 | * Simple Eager buffer allocation: we have already pre-allocated | |
264 | * the number of RcvArray entry groups. Each ctxtdata structure | |
265 | * holds the number of groups for that context. | |
266 | * | |
267 | * To follow CSR requirements and maintain cacheline alignment, | |
268 | * make sure all sizes and bases are multiples of group_size. | |
269 | * | |
270 | * The expected entry count is what is left after assigning | |
271 | * eager. | |
272 | */ | |
273 | max_entries = rcd->rcv_array_groups * | |
274 | dd->rcv_entries.group_size; | |
275 | rcvtids = ((max_entries * hfi1_rcvarr_split) / 100); | |
276 | rcd->egrbufs.count = round_down(rcvtids, | |
277 | dd->rcv_entries.group_size); | |
278 | if (rcd->egrbufs.count > MAX_EAGER_ENTRIES) { | |
279 | dd_dev_err(dd, "ctxt%u: requested too many RcvArray entries.\n", | |
280 | rcd->ctxt); | |
281 | rcd->egrbufs.count = MAX_EAGER_ENTRIES; | |
282 | } | |
6c63e423 SS |
283 | hfi1_cdbg(PROC, |
284 | "ctxt%u: max Eager buffer RcvArray entries: %u\n", | |
285 | rcd->ctxt, rcd->egrbufs.count); | |
77241056 MM |
286 | |
287 | /* | |
288 | * Allocate array that will hold the eager buffer accounting | |
289 | * data. | |
290 | * This will allocate the maximum possible buffer count based | |
291 | * on the value of the RcvArray split parameter. | |
292 | * The resulting value will be rounded down to the closest | |
293 | * multiple of dd->rcv_entries.group_size. | |
294 | */ | |
314fcc0d SB |
295 | rcd->egrbufs.buffers = kcalloc(rcd->egrbufs.count, |
296 | sizeof(*rcd->egrbufs.buffers), | |
297 | GFP_KERNEL); | |
77241056 MM |
298 | if (!rcd->egrbufs.buffers) |
299 | goto bail; | |
314fcc0d SB |
300 | rcd->egrbufs.rcvtids = kcalloc(rcd->egrbufs.count, |
301 | sizeof(*rcd->egrbufs.rcvtids), | |
302 | GFP_KERNEL); | |
77241056 MM |
303 | if (!rcd->egrbufs.rcvtids) |
304 | goto bail; | |
305 | rcd->egrbufs.size = eager_buffer_size; | |
306 | /* | |
307 | * The size of the buffers programmed into the RcvArray | |
308 | * entries needs to be big enough to handle the highest | |
309 | * MTU supported. | |
310 | */ | |
311 | if (rcd->egrbufs.size < hfi1_max_mtu) { | |
312 | rcd->egrbufs.size = __roundup_pow_of_two(hfi1_max_mtu); | |
6c63e423 SS |
313 | hfi1_cdbg(PROC, |
314 | "ctxt%u: eager bufs size too small. Adjusting to %zu\n", | |
77241056 MM |
315 | rcd->ctxt, rcd->egrbufs.size); |
316 | } | |
317 | rcd->egrbufs.rcvtid_size = HFI1_MAX_EAGER_BUFFER_SIZE; | |
318 | ||
319 | if (ctxt < dd->first_user_ctxt) { /* N/A for PSM contexts */ | |
320 | rcd->opstats = kzalloc(sizeof(*rcd->opstats), | |
321 | GFP_KERNEL); | |
806e6e1b | 322 | if (!rcd->opstats) |
77241056 | 323 | goto bail; |
77241056 MM |
324 | } |
325 | } | |
326 | return rcd; | |
327 | bail: | |
328 | kfree(rcd->opstats); | |
329 | kfree(rcd->egrbufs.rcvtids); | |
330 | kfree(rcd->egrbufs.buffers); | |
331 | kfree(rcd); | |
332 | return NULL; | |
333 | } | |
334 | ||
335 | /* | |
336 | * Convert a receive header entry size that to the encoding used in the CSR. | |
337 | * | |
338 | * Return a zero if the given size is invalid. | |
339 | */ | |
340 | static inline u64 encode_rcv_header_entry_size(u16 size) | |
341 | { | |
342 | /* there are only 3 valid receive header entry sizes */ | |
343 | if (size == 2) | |
344 | return 1; | |
345 | if (size == 16) | |
346 | return 2; | |
347 | else if (size == 32) | |
348 | return 4; | |
349 | return 0; /* invalid */ | |
350 | } | |
351 | ||
352 | /* | |
353 | * Select the largest ccti value over all SLs to determine the intra- | |
354 | * packet gap for the link. | |
355 | * | |
356 | * called with cca_timer_lock held (to protect access to cca_timer | |
357 | * array), and rcu_read_lock() (to protect access to cc_state). | |
358 | */ | |
359 | void set_link_ipg(struct hfi1_pportdata *ppd) | |
360 | { | |
361 | struct hfi1_devdata *dd = ppd->dd; | |
362 | struct cc_state *cc_state; | |
363 | int i; | |
364 | u16 cce, ccti_limit, max_ccti = 0; | |
365 | u16 shift, mult; | |
366 | u64 src; | |
367 | u32 current_egress_rate; /* Mbits /sec */ | |
368 | u32 max_pkt_time; | |
369 | /* | |
370 | * max_pkt_time is the maximum packet egress time in units | |
371 | * of the fabric clock period 1/(805 MHz). | |
372 | */ | |
373 | ||
374 | cc_state = get_cc_state(ppd); | |
375 | ||
376 | if (cc_state == NULL) | |
377 | /* | |
378 | * This should _never_ happen - rcu_read_lock() is held, | |
379 | * and set_link_ipg() should not be called if cc_state | |
380 | * is NULL. | |
381 | */ | |
382 | return; | |
383 | ||
384 | for (i = 0; i < OPA_MAX_SLS; i++) { | |
385 | u16 ccti = ppd->cca_timer[i].ccti; | |
386 | ||
387 | if (ccti > max_ccti) | |
388 | max_ccti = ccti; | |
389 | } | |
390 | ||
391 | ccti_limit = cc_state->cct.ccti_limit; | |
392 | if (max_ccti > ccti_limit) | |
393 | max_ccti = ccti_limit; | |
394 | ||
395 | cce = cc_state->cct.entries[max_ccti].entry; | |
396 | shift = (cce & 0xc000) >> 14; | |
397 | mult = (cce & 0x3fff); | |
398 | ||
399 | current_egress_rate = active_egress_rate(ppd); | |
400 | ||
401 | max_pkt_time = egress_cycles(ppd->ibmaxlen, current_egress_rate); | |
402 | ||
403 | src = (max_pkt_time >> shift) * mult; | |
404 | ||
405 | src &= SEND_STATIC_RATE_CONTROL_CSR_SRC_RELOAD_SMASK; | |
406 | src <<= SEND_STATIC_RATE_CONTROL_CSR_SRC_RELOAD_SHIFT; | |
407 | ||
408 | write_csr(dd, SEND_STATIC_RATE_CONTROL, src); | |
409 | } | |
410 | ||
411 | static enum hrtimer_restart cca_timer_fn(struct hrtimer *t) | |
412 | { | |
413 | struct cca_timer *cca_timer; | |
414 | struct hfi1_pportdata *ppd; | |
415 | int sl; | |
416 | u16 ccti, ccti_timer, ccti_min; | |
417 | struct cc_state *cc_state; | |
b77d713a | 418 | unsigned long flags; |
77241056 MM |
419 | |
420 | cca_timer = container_of(t, struct cca_timer, hrtimer); | |
421 | ppd = cca_timer->ppd; | |
422 | sl = cca_timer->sl; | |
423 | ||
424 | rcu_read_lock(); | |
425 | ||
426 | cc_state = get_cc_state(ppd); | |
427 | ||
428 | if (cc_state == NULL) { | |
429 | rcu_read_unlock(); | |
430 | return HRTIMER_NORESTART; | |
431 | } | |
432 | ||
433 | /* | |
434 | * 1) decrement ccti for SL | |
435 | * 2) calculate IPG for link (set_link_ipg()) | |
436 | * 3) restart timer, unless ccti is at min value | |
437 | */ | |
438 | ||
439 | ccti_min = cc_state->cong_setting.entries[sl].ccti_min; | |
440 | ccti_timer = cc_state->cong_setting.entries[sl].ccti_timer; | |
441 | ||
b77d713a | 442 | spin_lock_irqsave(&ppd->cca_timer_lock, flags); |
77241056 MM |
443 | |
444 | ccti = cca_timer->ccti; | |
445 | ||
446 | if (ccti > ccti_min) { | |
447 | cca_timer->ccti--; | |
448 | set_link_ipg(ppd); | |
449 | } | |
450 | ||
b77d713a | 451 | spin_unlock_irqrestore(&ppd->cca_timer_lock, flags); |
77241056 MM |
452 | |
453 | rcu_read_unlock(); | |
454 | ||
455 | if (ccti > ccti_min) { | |
456 | unsigned long nsec = 1024 * ccti_timer; | |
457 | /* ccti_timer is in units of 1.024 usec */ | |
458 | hrtimer_forward_now(t, ns_to_ktime(nsec)); | |
459 | return HRTIMER_RESTART; | |
460 | } | |
461 | return HRTIMER_NORESTART; | |
462 | } | |
463 | ||
464 | /* | |
465 | * Common code for initializing the physical port structure. | |
466 | */ | |
467 | void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd, | |
468 | struct hfi1_devdata *dd, u8 hw_pidx, u8 port) | |
469 | { | |
470 | int i, size; | |
471 | uint default_pkey_idx; | |
472 | ||
473 | ppd->dd = dd; | |
474 | ppd->hw_pidx = hw_pidx; | |
475 | ppd->port = port; /* IB port number, not index */ | |
476 | ||
477 | default_pkey_idx = 1; | |
478 | ||
479 | ppd->pkeys[default_pkey_idx] = DEFAULT_P_KEY; | |
480 | if (loopback) { | |
481 | hfi1_early_err(&pdev->dev, | |
482 | "Faking data partition 0x8001 in idx %u\n", | |
483 | !default_pkey_idx); | |
484 | ppd->pkeys[!default_pkey_idx] = 0x8001; | |
485 | } | |
486 | ||
487 | INIT_WORK(&ppd->link_vc_work, handle_verify_cap); | |
488 | INIT_WORK(&ppd->link_up_work, handle_link_up); | |
489 | INIT_WORK(&ppd->link_down_work, handle_link_down); | |
490 | INIT_WORK(&ppd->freeze_work, handle_freeze); | |
491 | INIT_WORK(&ppd->link_downgrade_work, handle_link_downgrade); | |
492 | INIT_WORK(&ppd->sma_message_work, handle_sma_message); | |
493 | INIT_WORK(&ppd->link_bounce_work, handle_link_bounce); | |
494 | mutex_init(&ppd->hls_lock); | |
495 | spin_lock_init(&ppd->sdma_alllock); | |
496 | spin_lock_init(&ppd->qsfp_info.qsfp_lock); | |
497 | ||
498 | ppd->sm_trap_qp = 0x0; | |
499 | ppd->sa_qp = 0x1; | |
500 | ||
501 | ppd->hfi1_wq = NULL; | |
502 | ||
503 | spin_lock_init(&ppd->cca_timer_lock); | |
504 | ||
505 | for (i = 0; i < OPA_MAX_SLS; i++) { | |
506 | hrtimer_init(&ppd->cca_timer[i].hrtimer, CLOCK_MONOTONIC, | |
507 | HRTIMER_MODE_REL); | |
508 | ppd->cca_timer[i].ppd = ppd; | |
509 | ppd->cca_timer[i].sl = i; | |
510 | ppd->cca_timer[i].ccti = 0; | |
511 | ppd->cca_timer[i].hrtimer.function = cca_timer_fn; | |
512 | } | |
513 | ||
514 | ppd->cc_max_table_entries = IB_CC_TABLE_CAP_DEFAULT; | |
515 | ||
516 | spin_lock_init(&ppd->cc_state_lock); | |
517 | spin_lock_init(&ppd->cc_log_lock); | |
518 | size = sizeof(struct cc_state); | |
519 | RCU_INIT_POINTER(ppd->cc_state, kzalloc(size, GFP_KERNEL)); | |
520 | if (!rcu_dereference(ppd->cc_state)) | |
521 | goto bail; | |
522 | return; | |
523 | ||
524 | bail: | |
525 | ||
526 | hfi1_early_err(&pdev->dev, | |
527 | "Congestion Control Agent disabled for port %d\n", port); | |
528 | } | |
529 | ||
530 | /* | |
531 | * Do initialization for device that is only needed on | |
532 | * first detect, not on resets. | |
533 | */ | |
534 | static int loadtime_init(struct hfi1_devdata *dd) | |
535 | { | |
536 | return 0; | |
537 | } | |
538 | ||
539 | /** | |
540 | * init_after_reset - re-initialize after a reset | |
541 | * @dd: the hfi1_ib device | |
542 | * | |
543 | * sanity check at least some of the values after reset, and | |
544 | * ensure no receive or transmit (explicitly, in case reset | |
545 | * failed | |
546 | */ | |
547 | static int init_after_reset(struct hfi1_devdata *dd) | |
548 | { | |
549 | int i; | |
550 | ||
551 | /* | |
552 | * Ensure chip does no sends or receives, tail updates, or | |
553 | * pioavail updates while we re-initialize. This is mostly | |
554 | * for the driver data structures, not chip registers. | |
555 | */ | |
556 | for (i = 0; i < dd->num_rcv_contexts; i++) | |
557 | hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS | | |
558 | HFI1_RCVCTRL_INTRAVAIL_DIS | | |
559 | HFI1_RCVCTRL_TAILUPD_DIS, i); | |
560 | pio_send_control(dd, PSC_GLOBAL_DISABLE); | |
561 | for (i = 0; i < dd->num_send_contexts; i++) | |
562 | sc_disable(dd->send_contexts[i].sc); | |
563 | ||
564 | return 0; | |
565 | } | |
566 | ||
567 | static void enable_chip(struct hfi1_devdata *dd) | |
568 | { | |
569 | u32 rcvmask; | |
570 | u32 i; | |
571 | ||
572 | /* enable PIO send */ | |
573 | pio_send_control(dd, PSC_GLOBAL_ENABLE); | |
574 | ||
575 | /* | |
576 | * Enable kernel ctxts' receive and receive interrupt. | |
577 | * Other ctxts done as user opens and initializes them. | |
578 | */ | |
579 | rcvmask = HFI1_RCVCTRL_CTXT_ENB | HFI1_RCVCTRL_INTRAVAIL_ENB; | |
580 | for (i = 0; i < dd->first_user_ctxt; ++i) { | |
581 | rcvmask |= HFI1_CAP_KGET_MASK(dd->rcd[i]->flags, DMA_RTAIL) ? | |
582 | HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS; | |
583 | if (!HFI1_CAP_KGET_MASK(dd->rcd[i]->flags, MULTI_PKT_EGR)) | |
584 | rcvmask |= HFI1_RCVCTRL_ONE_PKT_EGR_ENB; | |
585 | if (HFI1_CAP_KGET_MASK(dd->rcd[i]->flags, NODROP_RHQ_FULL)) | |
586 | rcvmask |= HFI1_RCVCTRL_NO_RHQ_DROP_ENB; | |
587 | if (HFI1_CAP_KGET_MASK(dd->rcd[i]->flags, NODROP_EGR_FULL)) | |
588 | rcvmask |= HFI1_RCVCTRL_NO_EGR_DROP_ENB; | |
589 | hfi1_rcvctrl(dd, rcvmask, i); | |
590 | sc_enable(dd->rcd[i]->sc); | |
591 | } | |
592 | } | |
593 | ||
594 | /** | |
595 | * create_workqueues - create per port workqueues | |
596 | * @dd: the hfi1_ib device | |
597 | */ | |
598 | static int create_workqueues(struct hfi1_devdata *dd) | |
599 | { | |
600 | int pidx; | |
601 | struct hfi1_pportdata *ppd; | |
602 | ||
603 | for (pidx = 0; pidx < dd->num_pports; ++pidx) { | |
604 | ppd = dd->pport + pidx; | |
605 | if (!ppd->hfi1_wq) { | |
606 | char wq_name[8]; /* 3 + 2 + 1 + 1 + 1 */ | |
607 | ||
608 | snprintf(wq_name, sizeof(wq_name), "hfi%d_%d", | |
609 | dd->unit, pidx); | |
610 | ppd->hfi1_wq = | |
611 | create_singlethread_workqueue(wq_name); | |
612 | if (!ppd->hfi1_wq) | |
613 | goto wq_error; | |
614 | } | |
615 | } | |
616 | return 0; | |
617 | wq_error: | |
618 | pr_err("create_singlethread_workqueue failed for port %d\n", | |
619 | pidx + 1); | |
620 | for (pidx = 0; pidx < dd->num_pports; ++pidx) { | |
621 | ppd = dd->pport + pidx; | |
622 | if (ppd->hfi1_wq) { | |
623 | destroy_workqueue(ppd->hfi1_wq); | |
624 | ppd->hfi1_wq = NULL; | |
625 | } | |
626 | } | |
627 | return -ENOMEM; | |
628 | } | |
629 | ||
630 | /** | |
631 | * hfi1_init - do the actual initialization sequence on the chip | |
632 | * @dd: the hfi1_ib device | |
633 | * @reinit: re-initializing, so don't allocate new memory | |
634 | * | |
635 | * Do the actual initialization sequence on the chip. This is done | |
636 | * both from the init routine called from the PCI infrastructure, and | |
637 | * when we reset the chip, or detect that it was reset internally, | |
638 | * or it's administratively re-enabled. | |
639 | * | |
640 | * Memory allocation here and in called routines is only done in | |
641 | * the first case (reinit == 0). We have to be careful, because even | |
642 | * without memory allocation, we need to re-write all the chip registers | |
643 | * TIDs, etc. after the reset or enable has completed. | |
644 | */ | |
645 | int hfi1_init(struct hfi1_devdata *dd, int reinit) | |
646 | { | |
647 | int ret = 0, pidx, lastfail = 0; | |
648 | unsigned i, len; | |
649 | struct hfi1_ctxtdata *rcd; | |
650 | struct hfi1_pportdata *ppd; | |
651 | ||
652 | /* Set up recv low level handlers */ | |
653 | dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_EXPECTED] = | |
654 | kdeth_process_expected; | |
655 | dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_EAGER] = | |
656 | kdeth_process_eager; | |
657 | dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_IB] = process_receive_ib; | |
658 | dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_ERROR] = | |
659 | process_receive_error; | |
660 | dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_BYPASS] = | |
661 | process_receive_bypass; | |
662 | dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_INVALID5] = | |
663 | process_receive_invalid; | |
664 | dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_INVALID6] = | |
665 | process_receive_invalid; | |
666 | dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_INVALID7] = | |
667 | process_receive_invalid; | |
668 | dd->rhf_rcv_function_map = dd->normal_rhf_rcv_functions; | |
669 | ||
670 | /* Set up send low level handlers */ | |
671 | dd->process_pio_send = hfi1_verbs_send_pio; | |
672 | dd->process_dma_send = hfi1_verbs_send_dma; | |
673 | dd->pio_inline_send = pio_copy; | |
674 | ||
675 | if (is_a0(dd)) { | |
676 | atomic_set(&dd->drop_packet, DROP_PACKET_ON); | |
677 | dd->do_drop = 1; | |
678 | } else { | |
679 | atomic_set(&dd->drop_packet, DROP_PACKET_OFF); | |
680 | dd->do_drop = 0; | |
681 | } | |
682 | ||
683 | /* make sure the link is not "up" */ | |
684 | for (pidx = 0; pidx < dd->num_pports; ++pidx) { | |
685 | ppd = dd->pport + pidx; | |
686 | ppd->linkup = 0; | |
687 | } | |
688 | ||
689 | if (reinit) | |
690 | ret = init_after_reset(dd); | |
691 | else | |
692 | ret = loadtime_init(dd); | |
693 | if (ret) | |
694 | goto done; | |
695 | ||
696 | /* dd->rcd can be NULL if early initialization failed */ | |
697 | for (i = 0; dd->rcd && i < dd->first_user_ctxt; ++i) { | |
698 | /* | |
699 | * Set up the (kernel) rcvhdr queue and egr TIDs. If doing | |
700 | * re-init, the simplest way to handle this is to free | |
701 | * existing, and re-allocate. | |
702 | * Need to re-create rest of ctxt 0 ctxtdata as well. | |
703 | */ | |
704 | rcd = dd->rcd[i]; | |
705 | if (!rcd) | |
706 | continue; | |
707 | ||
708 | rcd->do_interrupt = &handle_receive_interrupt; | |
709 | ||
710 | lastfail = hfi1_create_rcvhdrq(dd, rcd); | |
711 | if (!lastfail) | |
712 | lastfail = hfi1_setup_eagerbufs(rcd); | |
713 | if (lastfail) | |
714 | dd_dev_err(dd, | |
715 | "failed to allocate kernel ctxt's rcvhdrq and/or egr bufs\n"); | |
716 | } | |
717 | if (lastfail) | |
718 | ret = lastfail; | |
719 | ||
720 | /* Allocate enough memory for user event notification. */ | |
721 | len = ALIGN(dd->chip_rcv_contexts * HFI1_MAX_SHARED_CTXTS * | |
722 | sizeof(*dd->events), PAGE_SIZE); | |
723 | dd->events = vmalloc_user(len); | |
724 | if (!dd->events) | |
725 | dd_dev_err(dd, "Failed to allocate user events page\n"); | |
726 | /* | |
727 | * Allocate a page for device and port status. | |
728 | * Page will be shared amongst all user processes. | |
729 | */ | |
730 | dd->status = vmalloc_user(PAGE_SIZE); | |
731 | if (!dd->status) | |
732 | dd_dev_err(dd, "Failed to allocate dev status page\n"); | |
733 | else | |
734 | dd->freezelen = PAGE_SIZE - (sizeof(*dd->status) - | |
735 | sizeof(dd->status->freezemsg)); | |
736 | for (pidx = 0; pidx < dd->num_pports; ++pidx) { | |
737 | ppd = dd->pport + pidx; | |
738 | if (dd->status) | |
739 | /* Currently, we only have one port */ | |
740 | ppd->statusp = &dd->status->port; | |
741 | ||
742 | set_mtu(ppd); | |
743 | } | |
744 | ||
745 | /* enable chip even if we have an error, so we can debug cause */ | |
746 | enable_chip(dd); | |
747 | ||
748 | ret = hfi1_cq_init(dd); | |
749 | done: | |
750 | /* | |
751 | * Set status even if port serdes is not initialized | |
752 | * so that diags will work. | |
753 | */ | |
754 | if (dd->status) | |
755 | dd->status->dev |= HFI1_STATUS_CHIP_PRESENT | | |
756 | HFI1_STATUS_INITTED; | |
757 | if (!ret) { | |
758 | /* enable all interrupts from the chip */ | |
759 | set_intr_state(dd, 1); | |
760 | ||
761 | /* chip is OK for user apps; mark it as initialized */ | |
762 | for (pidx = 0; pidx < dd->num_pports; ++pidx) { | |
763 | ppd = dd->pport + pidx; | |
764 | ||
765 | /* initialize the qsfp if it exists | |
766 | * Requires interrupts to be enabled so we are notified | |
767 | * when the QSFP completes reset, and has | |
768 | * to be done before bringing up the SERDES | |
769 | */ | |
770 | init_qsfp(ppd); | |
771 | ||
772 | /* start the serdes - must be after interrupts are | |
773 | enabled so we are notified when the link goes up */ | |
774 | lastfail = bringup_serdes(ppd); | |
775 | if (lastfail) | |
776 | dd_dev_info(dd, | |
777 | "Failed to bring up port %u\n", | |
778 | ppd->port); | |
779 | ||
780 | /* | |
781 | * Set status even if port serdes is not initialized | |
782 | * so that diags will work. | |
783 | */ | |
784 | if (ppd->statusp) | |
785 | *ppd->statusp |= HFI1_STATUS_CHIP_PRESENT | | |
786 | HFI1_STATUS_INITTED; | |
787 | if (!ppd->link_speed_enabled) | |
788 | continue; | |
789 | } | |
790 | } | |
791 | ||
792 | /* if ret is non-zero, we probably should do some cleanup here... */ | |
793 | return ret; | |
794 | } | |
795 | ||
796 | static inline struct hfi1_devdata *__hfi1_lookup(int unit) | |
797 | { | |
798 | return idr_find(&hfi1_unit_table, unit); | |
799 | } | |
800 | ||
801 | struct hfi1_devdata *hfi1_lookup(int unit) | |
802 | { | |
803 | struct hfi1_devdata *dd; | |
804 | unsigned long flags; | |
805 | ||
806 | spin_lock_irqsave(&hfi1_devs_lock, flags); | |
807 | dd = __hfi1_lookup(unit); | |
808 | spin_unlock_irqrestore(&hfi1_devs_lock, flags); | |
809 | ||
810 | return dd; | |
811 | } | |
812 | ||
813 | /* | |
814 | * Stop the timers during unit shutdown, or after an error late | |
815 | * in initialization. | |
816 | */ | |
817 | static void stop_timers(struct hfi1_devdata *dd) | |
818 | { | |
819 | struct hfi1_pportdata *ppd; | |
820 | int pidx; | |
821 | ||
822 | for (pidx = 0; pidx < dd->num_pports; ++pidx) { | |
823 | ppd = dd->pport + pidx; | |
824 | if (ppd->led_override_timer.data) { | |
825 | del_timer_sync(&ppd->led_override_timer); | |
826 | atomic_set(&ppd->led_override_timer_active, 0); | |
827 | } | |
828 | } | |
829 | } | |
830 | ||
831 | /** | |
832 | * shutdown_device - shut down a device | |
833 | * @dd: the hfi1_ib device | |
834 | * | |
835 | * This is called to make the device quiet when we are about to | |
836 | * unload the driver, and also when the device is administratively | |
837 | * disabled. It does not free any data structures. | |
838 | * Everything it does has to be setup again by hfi1_init(dd, 1) | |
839 | */ | |
840 | static void shutdown_device(struct hfi1_devdata *dd) | |
841 | { | |
842 | struct hfi1_pportdata *ppd; | |
843 | unsigned pidx; | |
844 | int i; | |
845 | ||
846 | for (pidx = 0; pidx < dd->num_pports; ++pidx) { | |
847 | ppd = dd->pport + pidx; | |
848 | ||
849 | ppd->linkup = 0; | |
850 | if (ppd->statusp) | |
851 | *ppd->statusp &= ~(HFI1_STATUS_IB_CONF | | |
852 | HFI1_STATUS_IB_READY); | |
853 | } | |
854 | dd->flags &= ~HFI1_INITTED; | |
855 | ||
856 | /* mask interrupts, but not errors */ | |
857 | set_intr_state(dd, 0); | |
858 | ||
859 | for (pidx = 0; pidx < dd->num_pports; ++pidx) { | |
860 | ppd = dd->pport + pidx; | |
861 | for (i = 0; i < dd->num_rcv_contexts; i++) | |
862 | hfi1_rcvctrl(dd, HFI1_RCVCTRL_TAILUPD_DIS | | |
863 | HFI1_RCVCTRL_CTXT_DIS | | |
864 | HFI1_RCVCTRL_INTRAVAIL_DIS | | |
865 | HFI1_RCVCTRL_PKEY_DIS | | |
866 | HFI1_RCVCTRL_ONE_PKT_EGR_DIS, i); | |
867 | /* | |
868 | * Gracefully stop all sends allowing any in progress to | |
869 | * trickle out first. | |
870 | */ | |
871 | for (i = 0; i < dd->num_send_contexts; i++) | |
872 | sc_flush(dd->send_contexts[i].sc); | |
873 | } | |
874 | ||
875 | /* | |
876 | * Enough for anything that's going to trickle out to have actually | |
877 | * done so. | |
878 | */ | |
879 | udelay(20); | |
880 | ||
881 | for (pidx = 0; pidx < dd->num_pports; ++pidx) { | |
882 | ppd = dd->pport + pidx; | |
883 | ||
884 | /* disable all contexts */ | |
885 | for (i = 0; i < dd->num_send_contexts; i++) | |
886 | sc_disable(dd->send_contexts[i].sc); | |
887 | /* disable the send device */ | |
888 | pio_send_control(dd, PSC_GLOBAL_DISABLE); | |
889 | ||
890 | /* | |
891 | * Clear SerdesEnable. | |
892 | * We can't count on interrupts since we are stopping. | |
893 | */ | |
894 | hfi1_quiet_serdes(ppd); | |
895 | ||
896 | if (ppd->hfi1_wq) { | |
897 | destroy_workqueue(ppd->hfi1_wq); | |
898 | ppd->hfi1_wq = NULL; | |
899 | } | |
900 | } | |
901 | sdma_exit(dd); | |
902 | } | |
903 | ||
904 | /** | |
905 | * hfi1_free_ctxtdata - free a context's allocated data | |
906 | * @dd: the hfi1_ib device | |
907 | * @rcd: the ctxtdata structure | |
908 | * | |
909 | * free up any allocated data for a context | |
910 | * This should not touch anything that would affect a simultaneous | |
911 | * re-allocation of context data, because it is called after hfi1_mutex | |
912 | * is released (and can be called from reinit as well). | |
913 | * It should never change any chip state, or global driver state. | |
914 | */ | |
915 | void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd) | |
916 | { | |
917 | unsigned e; | |
918 | ||
919 | if (!rcd) | |
920 | return; | |
921 | ||
922 | if (rcd->rcvhdrq) { | |
923 | dma_free_coherent(&dd->pcidev->dev, rcd->rcvhdrq_size, | |
924 | rcd->rcvhdrq, rcd->rcvhdrq_phys); | |
925 | rcd->rcvhdrq = NULL; | |
926 | if (rcd->rcvhdrtail_kvaddr) { | |
927 | dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE, | |
928 | (void *)rcd->rcvhdrtail_kvaddr, | |
929 | rcd->rcvhdrqtailaddr_phys); | |
930 | rcd->rcvhdrtail_kvaddr = NULL; | |
931 | } | |
932 | } | |
933 | ||
934 | /* all the RcvArray entries should have been cleared by now */ | |
935 | kfree(rcd->egrbufs.rcvtids); | |
936 | ||
937 | for (e = 0; e < rcd->egrbufs.alloced; e++) { | |
938 | if (rcd->egrbufs.buffers[e].phys) | |
939 | dma_free_coherent(&dd->pcidev->dev, | |
940 | rcd->egrbufs.buffers[e].len, | |
941 | rcd->egrbufs.buffers[e].addr, | |
942 | rcd->egrbufs.buffers[e].phys); | |
943 | } | |
944 | kfree(rcd->egrbufs.buffers); | |
945 | ||
946 | sc_free(rcd->sc); | |
947 | vfree(rcd->physshadow); | |
948 | vfree(rcd->tid_pg_list); | |
949 | vfree(rcd->user_event_mask); | |
950 | vfree(rcd->subctxt_uregbase); | |
951 | vfree(rcd->subctxt_rcvegrbuf); | |
952 | vfree(rcd->subctxt_rcvhdr_base); | |
953 | kfree(rcd->tidusemap); | |
954 | kfree(rcd->opstats); | |
955 | kfree(rcd); | |
956 | } | |
957 | ||
958 | void hfi1_free_devdata(struct hfi1_devdata *dd) | |
959 | { | |
960 | unsigned long flags; | |
961 | ||
962 | spin_lock_irqsave(&hfi1_devs_lock, flags); | |
963 | idr_remove(&hfi1_unit_table, dd->unit); | |
964 | list_del(&dd->list); | |
965 | spin_unlock_irqrestore(&hfi1_devs_lock, flags); | |
966 | hfi1_dbg_ibdev_exit(&dd->verbs_dev); | |
967 | rcu_barrier(); /* wait for rcu callbacks to complete */ | |
968 | free_percpu(dd->int_counter); | |
969 | free_percpu(dd->rcv_limit); | |
970 | ib_dealloc_device(&dd->verbs_dev.ibdev); | |
971 | } | |
972 | ||
973 | /* | |
974 | * Allocate our primary per-unit data structure. Must be done via verbs | |
975 | * allocator, because the verbs cleanup process both does cleanup and | |
976 | * free of the data structure. | |
977 | * "extra" is for chip-specific data. | |
978 | * | |
979 | * Use the idr mechanism to get a unit number for this unit. | |
980 | */ | |
981 | struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra) | |
982 | { | |
983 | unsigned long flags; | |
984 | struct hfi1_devdata *dd; | |
985 | int ret; | |
986 | ||
987 | dd = (struct hfi1_devdata *)ib_alloc_device(sizeof(*dd) + extra); | |
988 | if (!dd) | |
989 | return ERR_PTR(-ENOMEM); | |
990 | /* extra is * number of ports */ | |
991 | dd->num_pports = extra / sizeof(struct hfi1_pportdata); | |
992 | dd->pport = (struct hfi1_pportdata *)(dd + 1); | |
993 | ||
994 | INIT_LIST_HEAD(&dd->list); | |
995 | dd->node = dev_to_node(&pdev->dev); | |
996 | if (dd->node < 0) | |
997 | dd->node = 0; | |
998 | idr_preload(GFP_KERNEL); | |
999 | spin_lock_irqsave(&hfi1_devs_lock, flags); | |
1000 | ||
1001 | ret = idr_alloc(&hfi1_unit_table, dd, 0, 0, GFP_NOWAIT); | |
1002 | if (ret >= 0) { | |
1003 | dd->unit = ret; | |
1004 | list_add(&dd->list, &hfi1_dev_list); | |
1005 | } | |
1006 | ||
1007 | spin_unlock_irqrestore(&hfi1_devs_lock, flags); | |
1008 | idr_preload_end(); | |
1009 | ||
1010 | if (ret < 0) { | |
1011 | hfi1_early_err(&pdev->dev, | |
1012 | "Could not allocate unit ID: error %d\n", -ret); | |
1013 | goto bail; | |
1014 | } | |
1015 | /* | |
1016 | * Initialize all locks for the device. This needs to be as early as | |
1017 | * possible so locks are usable. | |
1018 | */ | |
1019 | spin_lock_init(&dd->sc_lock); | |
1020 | spin_lock_init(&dd->sendctrl_lock); | |
1021 | spin_lock_init(&dd->rcvctrl_lock); | |
1022 | spin_lock_init(&dd->uctxt_lock); | |
1023 | spin_lock_init(&dd->hfi1_diag_trans_lock); | |
1024 | spin_lock_init(&dd->sc_init_lock); | |
1025 | spin_lock_init(&dd->dc8051_lock); | |
1026 | spin_lock_init(&dd->dc8051_memlock); | |
1027 | mutex_init(&dd->qsfp_i2c_mutex); | |
1028 | seqlock_init(&dd->sc2vl_lock); | |
1029 | spin_lock_init(&dd->sde_map_lock); | |
1030 | init_waitqueue_head(&dd->event_queue); | |
1031 | ||
1032 | dd->int_counter = alloc_percpu(u64); | |
1033 | if (!dd->int_counter) { | |
1034 | ret = -ENOMEM; | |
1035 | hfi1_early_err(&pdev->dev, | |
1036 | "Could not allocate per-cpu int_counter\n"); | |
1037 | goto bail; | |
1038 | } | |
1039 | ||
1040 | dd->rcv_limit = alloc_percpu(u64); | |
1041 | if (!dd->rcv_limit) { | |
1042 | ret = -ENOMEM; | |
1043 | hfi1_early_err(&pdev->dev, | |
1044 | "Could not allocate per-cpu rcv_limit\n"); | |
1045 | goto bail; | |
1046 | } | |
1047 | ||
1048 | if (!hfi1_cpulist_count) { | |
1049 | u32 count = num_online_cpus(); | |
1050 | ||
314fcc0d SB |
1051 | hfi1_cpulist = kcalloc(BITS_TO_LONGS(count), sizeof(long), |
1052 | GFP_KERNEL); | |
77241056 MM |
1053 | if (hfi1_cpulist) |
1054 | hfi1_cpulist_count = count; | |
1055 | else | |
1056 | hfi1_early_err( | |
1057 | &pdev->dev, | |
1058 | "Could not alloc cpulist info, cpu affinity might be wrong\n"); | |
1059 | } | |
1060 | hfi1_dbg_ibdev_init(&dd->verbs_dev); | |
1061 | return dd; | |
1062 | ||
1063 | bail: | |
1064 | if (!list_empty(&dd->list)) | |
1065 | list_del_init(&dd->list); | |
1066 | ib_dealloc_device(&dd->verbs_dev.ibdev); | |
1067 | return ERR_PTR(ret); | |
1068 | } | |
1069 | ||
1070 | /* | |
1071 | * Called from freeze mode handlers, and from PCI error | |
1072 | * reporting code. Should be paranoid about state of | |
1073 | * system and data structures. | |
1074 | */ | |
1075 | void hfi1_disable_after_error(struct hfi1_devdata *dd) | |
1076 | { | |
1077 | if (dd->flags & HFI1_INITTED) { | |
1078 | u32 pidx; | |
1079 | ||
1080 | dd->flags &= ~HFI1_INITTED; | |
1081 | if (dd->pport) | |
1082 | for (pidx = 0; pidx < dd->num_pports; ++pidx) { | |
1083 | struct hfi1_pportdata *ppd; | |
1084 | ||
1085 | ppd = dd->pport + pidx; | |
1086 | if (dd->flags & HFI1_PRESENT) | |
1087 | set_link_state(ppd, HLS_DN_DISABLE); | |
1088 | ||
1089 | if (ppd->statusp) | |
1090 | *ppd->statusp &= ~HFI1_STATUS_IB_READY; | |
1091 | } | |
1092 | } | |
1093 | ||
1094 | /* | |
1095 | * Mark as having had an error for driver, and also | |
1096 | * for /sys and status word mapped to user programs. | |
1097 | * This marks unit as not usable, until reset. | |
1098 | */ | |
1099 | if (dd->status) | |
1100 | dd->status->dev |= HFI1_STATUS_HWERROR; | |
1101 | } | |
1102 | ||
1103 | static void remove_one(struct pci_dev *); | |
1104 | static int init_one(struct pci_dev *, const struct pci_device_id *); | |
1105 | ||
1106 | #define DRIVER_LOAD_MSG "Intel " DRIVER_NAME " loaded: " | |
1107 | #define PFX DRIVER_NAME ": " | |
1108 | ||
1109 | static const struct pci_device_id hfi1_pci_tbl[] = { | |
1110 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL0) }, | |
1111 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL1) }, | |
1112 | { 0, } | |
1113 | }; | |
1114 | ||
1115 | MODULE_DEVICE_TABLE(pci, hfi1_pci_tbl); | |
1116 | ||
1117 | static struct pci_driver hfi1_pci_driver = { | |
1118 | .name = DRIVER_NAME, | |
1119 | .probe = init_one, | |
1120 | .remove = remove_one, | |
1121 | .id_table = hfi1_pci_tbl, | |
1122 | .err_handler = &hfi1_pci_err_handler, | |
1123 | }; | |
1124 | ||
1125 | static void __init compute_krcvqs(void) | |
1126 | { | |
1127 | int i; | |
1128 | ||
1129 | for (i = 0; i < krcvqsset; i++) | |
1130 | n_krcvqs += krcvqs[i]; | |
1131 | } | |
1132 | ||
1133 | /* | |
1134 | * Do all the generic driver unit- and chip-independent memory | |
1135 | * allocation and initialization. | |
1136 | */ | |
1137 | static int __init hfi1_mod_init(void) | |
1138 | { | |
1139 | int ret; | |
1140 | ||
1141 | ret = dev_init(); | |
1142 | if (ret) | |
1143 | goto bail; | |
1144 | ||
1145 | /* validate max MTU before any devices start */ | |
1146 | if (!valid_opa_max_mtu(hfi1_max_mtu)) { | |
1147 | pr_err("Invalid max_mtu 0x%x, using 0x%x instead\n", | |
1148 | hfi1_max_mtu, HFI1_DEFAULT_MAX_MTU); | |
1149 | hfi1_max_mtu = HFI1_DEFAULT_MAX_MTU; | |
1150 | } | |
1151 | /* valid CUs run from 1-128 in powers of 2 */ | |
1152 | if (hfi1_cu > 128 || !is_power_of_2(hfi1_cu)) | |
1153 | hfi1_cu = 1; | |
1154 | /* valid credit return threshold is 0-100, variable is unsigned */ | |
1155 | if (user_credit_return_threshold > 100) | |
1156 | user_credit_return_threshold = 100; | |
1157 | ||
1158 | compute_krcvqs(); | |
1159 | /* sanitize receive interrupt count, time must wait until after | |
1160 | the hardware type is known */ | |
1161 | if (rcv_intr_count > RCV_HDR_HEAD_COUNTER_MASK) | |
1162 | rcv_intr_count = RCV_HDR_HEAD_COUNTER_MASK; | |
1163 | /* reject invalid combinations */ | |
1164 | if (rcv_intr_count == 0 && rcv_intr_timeout == 0) { | |
1165 | pr_err("Invalid mode: both receive interrupt count and available timeout are zero - setting interrupt count to 1\n"); | |
1166 | rcv_intr_count = 1; | |
1167 | } | |
1168 | if (rcv_intr_count > 1 && rcv_intr_timeout == 0) { | |
1169 | /* | |
1170 | * Avoid indefinite packet delivery by requiring a timeout | |
1171 | * if count is > 1. | |
1172 | */ | |
1173 | pr_err("Invalid mode: receive interrupt count greater than 1 and available timeout is zero - setting available timeout to 1\n"); | |
1174 | rcv_intr_timeout = 1; | |
1175 | } | |
1176 | if (rcv_intr_dynamic && !(rcv_intr_count > 1 && rcv_intr_timeout > 0)) { | |
1177 | /* | |
1178 | * The dynamic algorithm expects a non-zero timeout | |
1179 | * and a count > 1. | |
1180 | */ | |
1181 | pr_err("Invalid mode: dynamic receive interrupt mitigation with invalid count and timeout - turning dynamic off\n"); | |
1182 | rcv_intr_dynamic = 0; | |
1183 | } | |
1184 | ||
1185 | /* sanitize link CRC options */ | |
1186 | link_crc_mask &= SUPPORTED_CRCS; | |
1187 | ||
1188 | /* | |
1189 | * These must be called before the driver is registered with | |
1190 | * the PCI subsystem. | |
1191 | */ | |
1192 | idr_init(&hfi1_unit_table); | |
1193 | ||
1194 | hfi1_dbg_init(); | |
1195 | ret = pci_register_driver(&hfi1_pci_driver); | |
1196 | if (ret < 0) { | |
1197 | pr_err("Unable to register driver: error %d\n", -ret); | |
1198 | goto bail_dev; | |
1199 | } | |
1200 | goto bail; /* all OK */ | |
1201 | ||
1202 | bail_dev: | |
1203 | hfi1_dbg_exit(); | |
1204 | idr_destroy(&hfi1_unit_table); | |
1205 | dev_cleanup(); | |
1206 | bail: | |
1207 | return ret; | |
1208 | } | |
1209 | ||
1210 | module_init(hfi1_mod_init); | |
1211 | ||
1212 | /* | |
1213 | * Do the non-unit driver cleanup, memory free, etc. at unload. | |
1214 | */ | |
1215 | static void __exit hfi1_mod_cleanup(void) | |
1216 | { | |
1217 | pci_unregister_driver(&hfi1_pci_driver); | |
1218 | hfi1_dbg_exit(); | |
1219 | hfi1_cpulist_count = 0; | |
1220 | kfree(hfi1_cpulist); | |
1221 | ||
1222 | idr_destroy(&hfi1_unit_table); | |
1223 | dispose_firmware(); /* asymmetric with obtain_firmware() */ | |
1224 | dev_cleanup(); | |
1225 | } | |
1226 | ||
1227 | module_exit(hfi1_mod_cleanup); | |
1228 | ||
1229 | /* this can only be called after a successful initialization */ | |
1230 | static void cleanup_device_data(struct hfi1_devdata *dd) | |
1231 | { | |
1232 | int ctxt; | |
1233 | int pidx; | |
1234 | struct hfi1_ctxtdata **tmp; | |
1235 | unsigned long flags; | |
1236 | ||
1237 | /* users can't do anything more with chip */ | |
1238 | for (pidx = 0; pidx < dd->num_pports; ++pidx) { | |
1239 | struct hfi1_pportdata *ppd = &dd->pport[pidx]; | |
1240 | struct cc_state *cc_state; | |
1241 | int i; | |
1242 | ||
1243 | if (ppd->statusp) | |
1244 | *ppd->statusp &= ~HFI1_STATUS_CHIP_PRESENT; | |
1245 | ||
1246 | for (i = 0; i < OPA_MAX_SLS; i++) | |
1247 | hrtimer_cancel(&ppd->cca_timer[i].hrtimer); | |
1248 | ||
1249 | spin_lock(&ppd->cc_state_lock); | |
1250 | cc_state = get_cc_state(ppd); | |
1251 | rcu_assign_pointer(ppd->cc_state, NULL); | |
1252 | spin_unlock(&ppd->cc_state_lock); | |
1253 | ||
1254 | if (cc_state) | |
1255 | call_rcu(&cc_state->rcu, cc_state_reclaim); | |
1256 | } | |
1257 | ||
1258 | free_credit_return(dd); | |
1259 | ||
1260 | /* | |
1261 | * Free any resources still in use (usually just kernel contexts) | |
1262 | * at unload; we do for ctxtcnt, because that's what we allocate. | |
1263 | * We acquire lock to be really paranoid that rcd isn't being | |
1264 | * accessed from some interrupt-related code (that should not happen, | |
1265 | * but best to be sure). | |
1266 | */ | |
1267 | spin_lock_irqsave(&dd->uctxt_lock, flags); | |
1268 | tmp = dd->rcd; | |
1269 | dd->rcd = NULL; | |
1270 | spin_unlock_irqrestore(&dd->uctxt_lock, flags); | |
1271 | for (ctxt = 0; tmp && ctxt < dd->num_rcv_contexts; ctxt++) { | |
1272 | struct hfi1_ctxtdata *rcd = tmp[ctxt]; | |
1273 | ||
1274 | tmp[ctxt] = NULL; /* debugging paranoia */ | |
1275 | if (rcd) { | |
1276 | hfi1_clear_tids(rcd); | |
1277 | hfi1_free_ctxtdata(dd, rcd); | |
1278 | } | |
1279 | } | |
1280 | kfree(tmp); | |
1281 | /* must follow rcv context free - need to remove rcv's hooks */ | |
1282 | for (ctxt = 0; ctxt < dd->num_send_contexts; ctxt++) | |
1283 | sc_free(dd->send_contexts[ctxt].sc); | |
1284 | dd->num_send_contexts = 0; | |
1285 | kfree(dd->send_contexts); | |
1286 | dd->send_contexts = NULL; | |
1287 | kfree(dd->boardname); | |
1288 | vfree(dd->events); | |
1289 | vfree(dd->status); | |
1290 | hfi1_cq_exit(dd); | |
1291 | } | |
1292 | ||
1293 | /* | |
1294 | * Clean up on unit shutdown, or error during unit load after | |
1295 | * successful initialization. | |
1296 | */ | |
1297 | static void postinit_cleanup(struct hfi1_devdata *dd) | |
1298 | { | |
1299 | hfi1_start_cleanup(dd); | |
1300 | ||
1301 | hfi1_pcie_ddcleanup(dd); | |
1302 | hfi1_pcie_cleanup(dd->pcidev); | |
1303 | ||
1304 | cleanup_device_data(dd); | |
1305 | ||
1306 | hfi1_free_devdata(dd); | |
1307 | } | |
1308 | ||
1309 | static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |
1310 | { | |
1311 | int ret = 0, j, pidx, initfail; | |
1312 | struct hfi1_devdata *dd = NULL; | |
1313 | ||
1314 | /* First, lock the non-writable module parameters */ | |
1315 | HFI1_CAP_LOCK(); | |
1316 | ||
1317 | /* Validate some global module parameters */ | |
1318 | if (rcvhdrcnt <= HFI1_MIN_HDRQ_EGRBUF_CNT) { | |
1319 | hfi1_early_err(&pdev->dev, "Header queue count too small\n"); | |
1320 | ret = -EINVAL; | |
1321 | goto bail; | |
1322 | } | |
1323 | /* use the encoding function as a sanitization check */ | |
1324 | if (!encode_rcv_header_entry_size(hfi1_hdrq_entsize)) { | |
1325 | hfi1_early_err(&pdev->dev, "Invalid HdrQ Entry size %u\n", | |
1326 | hfi1_hdrq_entsize); | |
1327 | goto bail; | |
1328 | } | |
1329 | ||
1330 | /* The receive eager buffer size must be set before the receive | |
1331 | * contexts are created. | |
1332 | * | |
1333 | * Set the eager buffer size. Validate that it falls in a range | |
1334 | * allowed by the hardware - all powers of 2 between the min and | |
1335 | * max. The maximum valid MTU is within the eager buffer range | |
1336 | * so we do not need to cap the max_mtu by an eager buffer size | |
1337 | * setting. | |
1338 | */ | |
1339 | if (eager_buffer_size) { | |
1340 | if (!is_power_of_2(eager_buffer_size)) | |
1341 | eager_buffer_size = | |
1342 | roundup_pow_of_two(eager_buffer_size); | |
1343 | eager_buffer_size = | |
1344 | clamp_val(eager_buffer_size, | |
1345 | MIN_EAGER_BUFFER * 8, | |
1346 | MAX_EAGER_BUFFER_TOTAL); | |
1347 | hfi1_early_info(&pdev->dev, "Eager buffer size %u\n", | |
1348 | eager_buffer_size); | |
1349 | } else { | |
1350 | hfi1_early_err(&pdev->dev, "Invalid Eager buffer size of 0\n"); | |
1351 | ret = -EINVAL; | |
1352 | goto bail; | |
1353 | } | |
1354 | ||
1355 | /* restrict value of hfi1_rcvarr_split */ | |
1356 | hfi1_rcvarr_split = clamp_val(hfi1_rcvarr_split, 0, 100); | |
1357 | ||
1358 | ret = hfi1_pcie_init(pdev, ent); | |
1359 | if (ret) | |
1360 | goto bail; | |
1361 | ||
1362 | /* | |
1363 | * Do device-specific initialization, function table setup, dd | |
1364 | * allocation, etc. | |
1365 | */ | |
1366 | switch (ent->device) { | |
1367 | case PCI_DEVICE_ID_INTEL0: | |
1368 | case PCI_DEVICE_ID_INTEL1: | |
1369 | dd = hfi1_init_dd(pdev, ent); | |
1370 | break; | |
1371 | default: | |
1372 | hfi1_early_err(&pdev->dev, | |
1373 | "Failing on unknown Intel deviceid 0x%x\n", | |
1374 | ent->device); | |
1375 | ret = -ENODEV; | |
1376 | } | |
1377 | ||
1378 | if (IS_ERR(dd)) | |
1379 | ret = PTR_ERR(dd); | |
1380 | if (ret) | |
1381 | goto clean_bail; /* error already printed */ | |
1382 | ||
1383 | ret = create_workqueues(dd); | |
1384 | if (ret) | |
1385 | goto clean_bail; | |
1386 | ||
1387 | /* do the generic initialization */ | |
1388 | initfail = hfi1_init(dd, 0); | |
1389 | ||
1390 | ret = hfi1_register_ib_device(dd); | |
1391 | ||
1392 | /* | |
1393 | * Now ready for use. this should be cleared whenever we | |
1394 | * detect a reset, or initiate one. If earlier failure, | |
1395 | * we still create devices, so diags, etc. can be used | |
1396 | * to determine cause of problem. | |
1397 | */ | |
1398 | if (!initfail && !ret) | |
1399 | dd->flags |= HFI1_INITTED; | |
1400 | ||
1401 | j = hfi1_device_create(dd); | |
1402 | if (j) | |
1403 | dd_dev_err(dd, "Failed to create /dev devices: %d\n", -j); | |
1404 | ||
1405 | if (initfail || ret) { | |
1406 | stop_timers(dd); | |
1407 | flush_workqueue(ib_wq); | |
1408 | for (pidx = 0; pidx < dd->num_pports; ++pidx) | |
1409 | hfi1_quiet_serdes(dd->pport + pidx); | |
1410 | if (!j) | |
1411 | hfi1_device_remove(dd); | |
1412 | if (!ret) | |
1413 | hfi1_unregister_ib_device(dd); | |
1414 | postinit_cleanup(dd); | |
1415 | if (initfail) | |
1416 | ret = initfail; | |
1417 | goto bail; /* everything already cleaned */ | |
1418 | } | |
1419 | ||
1420 | sdma_start(dd); | |
1421 | ||
1422 | return 0; | |
1423 | ||
1424 | clean_bail: | |
1425 | hfi1_pcie_cleanup(pdev); | |
1426 | bail: | |
1427 | return ret; | |
1428 | } | |
1429 | ||
1430 | static void remove_one(struct pci_dev *pdev) | |
1431 | { | |
1432 | struct hfi1_devdata *dd = pci_get_drvdata(pdev); | |
1433 | ||
1434 | /* unregister from IB core */ | |
1435 | hfi1_unregister_ib_device(dd); | |
1436 | ||
1437 | /* | |
1438 | * Disable the IB link, disable interrupts on the device, | |
1439 | * clear dma engines, etc. | |
1440 | */ | |
1441 | shutdown_device(dd); | |
1442 | ||
1443 | stop_timers(dd); | |
1444 | ||
1445 | /* wait until all of our (qsfp) queue_work() calls complete */ | |
1446 | flush_workqueue(ib_wq); | |
1447 | ||
1448 | hfi1_device_remove(dd); | |
1449 | ||
1450 | postinit_cleanup(dd); | |
1451 | } | |
1452 | ||
1453 | /** | |
1454 | * hfi1_create_rcvhdrq - create a receive header queue | |
1455 | * @dd: the hfi1_ib device | |
1456 | * @rcd: the context data | |
1457 | * | |
1458 | * This must be contiguous memory (from an i/o perspective), and must be | |
1459 | * DMA'able (which means for some systems, it will go through an IOMMU, | |
1460 | * or be forced into a low address range). | |
1461 | */ | |
1462 | int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd) | |
1463 | { | |
1464 | unsigned amt; | |
1465 | u64 reg; | |
1466 | ||
1467 | if (!rcd->rcvhdrq) { | |
1468 | dma_addr_t phys_hdrqtail; | |
1469 | gfp_t gfp_flags; | |
1470 | ||
1471 | /* | |
1472 | * rcvhdrqentsize is in DWs, so we have to convert to bytes | |
1473 | * (* sizeof(u32)). | |
1474 | */ | |
1475 | amt = ALIGN(rcd->rcvhdrq_cnt * rcd->rcvhdrqentsize * | |
1476 | sizeof(u32), PAGE_SIZE); | |
1477 | ||
1478 | gfp_flags = (rcd->ctxt >= dd->first_user_ctxt) ? | |
1479 | GFP_USER : GFP_KERNEL; | |
1480 | rcd->rcvhdrq = dma_zalloc_coherent( | |
1481 | &dd->pcidev->dev, amt, &rcd->rcvhdrq_phys, | |
1482 | gfp_flags | __GFP_COMP); | |
1483 | ||
1484 | if (!rcd->rcvhdrq) { | |
1485 | dd_dev_err(dd, | |
1486 | "attempt to allocate %d bytes for ctxt %u rcvhdrq failed\n", | |
1487 | amt, rcd->ctxt); | |
1488 | goto bail; | |
1489 | } | |
1490 | ||
1491 | /* Event mask is per device now and is in hfi1_devdata */ | |
1492 | /*if (rcd->ctxt >= dd->first_user_ctxt) { | |
1493 | rcd->user_event_mask = vmalloc_user(PAGE_SIZE); | |
1494 | if (!rcd->user_event_mask) | |
1495 | goto bail_free_hdrq; | |
1496 | }*/ | |
1497 | ||
1498 | if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL)) { | |
1499 | rcd->rcvhdrtail_kvaddr = dma_zalloc_coherent( | |
1500 | &dd->pcidev->dev, PAGE_SIZE, &phys_hdrqtail, | |
1501 | gfp_flags); | |
1502 | if (!rcd->rcvhdrtail_kvaddr) | |
1503 | goto bail_free; | |
1504 | rcd->rcvhdrqtailaddr_phys = phys_hdrqtail; | |
1505 | } | |
1506 | ||
1507 | rcd->rcvhdrq_size = amt; | |
1508 | } | |
1509 | /* | |
1510 | * These values are per-context: | |
1511 | * RcvHdrCnt | |
1512 | * RcvHdrEntSize | |
1513 | * RcvHdrSize | |
1514 | */ | |
1515 | reg = ((u64)(rcd->rcvhdrq_cnt >> HDRQ_SIZE_SHIFT) | |
1516 | & RCV_HDR_CNT_CNT_MASK) | |
1517 | << RCV_HDR_CNT_CNT_SHIFT; | |
1518 | write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_CNT, reg); | |
1519 | reg = (encode_rcv_header_entry_size(rcd->rcvhdrqentsize) | |
1520 | & RCV_HDR_ENT_SIZE_ENT_SIZE_MASK) | |
1521 | << RCV_HDR_ENT_SIZE_ENT_SIZE_SHIFT; | |
1522 | write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_ENT_SIZE, reg); | |
1523 | reg = (dd->rcvhdrsize & RCV_HDR_SIZE_HDR_SIZE_MASK) | |
1524 | << RCV_HDR_SIZE_HDR_SIZE_SHIFT; | |
1525 | write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_SIZE, reg); | |
1526 | return 0; | |
1527 | ||
1528 | bail_free: | |
1529 | dd_dev_err(dd, | |
1530 | "attempt to allocate 1 page for ctxt %u rcvhdrqtailaddr failed\n", | |
1531 | rcd->ctxt); | |
1532 | vfree(rcd->user_event_mask); | |
1533 | rcd->user_event_mask = NULL; | |
1534 | dma_free_coherent(&dd->pcidev->dev, amt, rcd->rcvhdrq, | |
1535 | rcd->rcvhdrq_phys); | |
1536 | rcd->rcvhdrq = NULL; | |
1537 | bail: | |
1538 | return -ENOMEM; | |
1539 | } | |
1540 | ||
1541 | /** | |
1542 | * allocate eager buffers, both kernel and user contexts. | |
1543 | * @rcd: the context we are setting up. | |
1544 | * | |
1545 | * Allocate the eager TID buffers and program them into hip. | |
1546 | * They are no longer completely contiguous, we do multiple allocation | |
1547 | * calls. Otherwise we get the OOM code involved, by asking for too | |
1548 | * much per call, with disastrous results on some kernels. | |
1549 | */ | |
1550 | int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd) | |
1551 | { | |
1552 | struct hfi1_devdata *dd = rcd->dd; | |
1553 | u32 max_entries, egrtop, alloced_bytes = 0, idx = 0; | |
1554 | gfp_t gfp_flags; | |
1555 | u16 order; | |
1556 | int ret = 0; | |
1557 | u16 round_mtu = roundup_pow_of_two(hfi1_max_mtu); | |
1558 | ||
1559 | /* | |
1560 | * GFP_USER, but without GFP_FS, so buffer cache can be | |
1561 | * coalesced (we hope); otherwise, even at order 4, | |
1562 | * heavy filesystem activity makes these fail, and we can | |
1563 | * use compound pages. | |
1564 | */ | |
71baba4b | 1565 | gfp_flags = __GFP_RECLAIM | __GFP_IO | __GFP_COMP; |
77241056 MM |
1566 | |
1567 | /* | |
1568 | * The minimum size of the eager buffers is a groups of MTU-sized | |
1569 | * buffers. | |
1570 | * The global eager_buffer_size parameter is checked against the | |
1571 | * theoretical lower limit of the value. Here, we check against the | |
1572 | * MTU. | |
1573 | */ | |
1574 | if (rcd->egrbufs.size < (round_mtu * dd->rcv_entries.group_size)) | |
1575 | rcd->egrbufs.size = round_mtu * dd->rcv_entries.group_size; | |
1576 | /* | |
1577 | * If using one-pkt-per-egr-buffer, lower the eager buffer | |
1578 | * size to the max MTU (page-aligned). | |
1579 | */ | |
1580 | if (!HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR)) | |
1581 | rcd->egrbufs.rcvtid_size = round_mtu; | |
1582 | ||
1583 | /* | |
1584 | * Eager buffers sizes of 1MB or less require smaller TID sizes | |
1585 | * to satisfy the "multiple of 8 RcvArray entries" requirement. | |
1586 | */ | |
1587 | if (rcd->egrbufs.size <= (1 << 20)) | |
1588 | rcd->egrbufs.rcvtid_size = max((unsigned long)round_mtu, | |
1589 | rounddown_pow_of_two(rcd->egrbufs.size / 8)); | |
1590 | ||
1591 | while (alloced_bytes < rcd->egrbufs.size && | |
1592 | rcd->egrbufs.alloced < rcd->egrbufs.count) { | |
1593 | rcd->egrbufs.buffers[idx].addr = | |
1594 | dma_zalloc_coherent(&dd->pcidev->dev, | |
1595 | rcd->egrbufs.rcvtid_size, | |
1596 | &rcd->egrbufs.buffers[idx].phys, | |
1597 | gfp_flags); | |
1598 | if (rcd->egrbufs.buffers[idx].addr) { | |
1599 | rcd->egrbufs.buffers[idx].len = | |
1600 | rcd->egrbufs.rcvtid_size; | |
1601 | rcd->egrbufs.rcvtids[rcd->egrbufs.alloced].addr = | |
1602 | rcd->egrbufs.buffers[idx].addr; | |
1603 | rcd->egrbufs.rcvtids[rcd->egrbufs.alloced].phys = | |
1604 | rcd->egrbufs.buffers[idx].phys; | |
1605 | rcd->egrbufs.alloced++; | |
1606 | alloced_bytes += rcd->egrbufs.rcvtid_size; | |
1607 | idx++; | |
1608 | } else { | |
1609 | u32 new_size, i, j; | |
1610 | u64 offset = 0; | |
1611 | ||
1612 | /* | |
1613 | * Fail the eager buffer allocation if: | |
1614 | * - we are already using the lowest acceptable size | |
1615 | * - we are using one-pkt-per-egr-buffer (this implies | |
1616 | * that we are accepting only one size) | |
1617 | */ | |
1618 | if (rcd->egrbufs.rcvtid_size == round_mtu || | |
1619 | !HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR)) { | |
1620 | dd_dev_err(dd, "ctxt%u: Failed to allocate eager buffers\n", | |
1621 | rcd->ctxt); | |
1622 | goto bail_rcvegrbuf_phys; | |
1623 | } | |
1624 | ||
1625 | new_size = rcd->egrbufs.rcvtid_size / 2; | |
1626 | ||
1627 | /* | |
1628 | * If the first attempt to allocate memory failed, don't | |
1629 | * fail everything but continue with the next lower | |
1630 | * size. | |
1631 | */ | |
1632 | if (idx == 0) { | |
1633 | rcd->egrbufs.rcvtid_size = new_size; | |
1634 | continue; | |
1635 | } | |
1636 | ||
1637 | /* | |
1638 | * Re-partition already allocated buffers to a smaller | |
1639 | * size. | |
1640 | */ | |
1641 | rcd->egrbufs.alloced = 0; | |
1642 | for (i = 0, j = 0, offset = 0; j < idx; i++) { | |
1643 | if (i >= rcd->egrbufs.count) | |
1644 | break; | |
1645 | rcd->egrbufs.rcvtids[i].phys = | |
1646 | rcd->egrbufs.buffers[j].phys + offset; | |
1647 | rcd->egrbufs.rcvtids[i].addr = | |
1648 | rcd->egrbufs.buffers[j].addr + offset; | |
1649 | rcd->egrbufs.alloced++; | |
1650 | if ((rcd->egrbufs.buffers[j].phys + offset + | |
1651 | new_size) == | |
1652 | (rcd->egrbufs.buffers[j].phys + | |
1653 | rcd->egrbufs.buffers[j].len)) { | |
1654 | j++; | |
1655 | offset = 0; | |
1656 | } else | |
1657 | offset += new_size; | |
1658 | } | |
1659 | rcd->egrbufs.rcvtid_size = new_size; | |
1660 | } | |
1661 | } | |
1662 | rcd->egrbufs.numbufs = idx; | |
1663 | rcd->egrbufs.size = alloced_bytes; | |
1664 | ||
6c63e423 SS |
1665 | hfi1_cdbg(PROC, |
1666 | "ctxt%u: Alloced %u rcv tid entries @ %uKB, total %zuKB\n", | |
1667 | rcd->ctxt, rcd->egrbufs.alloced, rcd->egrbufs.rcvtid_size, | |
1668 | rcd->egrbufs.size); | |
1669 | ||
77241056 MM |
1670 | |
1671 | /* | |
1672 | * Set the contexts rcv array head update threshold to the closest | |
1673 | * power of 2 (so we can use a mask instead of modulo) below half | |
1674 | * the allocated entries. | |
1675 | */ | |
1676 | rcd->egrbufs.threshold = | |
1677 | rounddown_pow_of_two(rcd->egrbufs.alloced / 2); | |
1678 | /* | |
1679 | * Compute the expected RcvArray entry base. This is done after | |
1680 | * allocating the eager buffers in order to maximize the | |
1681 | * expected RcvArray entries for the context. | |
1682 | */ | |
1683 | max_entries = rcd->rcv_array_groups * dd->rcv_entries.group_size; | |
1684 | egrtop = roundup(rcd->egrbufs.alloced, dd->rcv_entries.group_size); | |
1685 | rcd->expected_count = max_entries - egrtop; | |
1686 | if (rcd->expected_count > MAX_TID_PAIR_ENTRIES * 2) | |
1687 | rcd->expected_count = MAX_TID_PAIR_ENTRIES * 2; | |
1688 | ||
1689 | rcd->expected_base = rcd->eager_base + egrtop; | |
6c63e423 SS |
1690 | hfi1_cdbg(PROC, "ctxt%u: eager:%u, exp:%u, egrbase:%u, expbase:%u\n", |
1691 | rcd->ctxt, rcd->egrbufs.alloced, rcd->expected_count, | |
1692 | rcd->eager_base, rcd->expected_base); | |
77241056 MM |
1693 | |
1694 | if (!hfi1_rcvbuf_validate(rcd->egrbufs.rcvtid_size, PT_EAGER, &order)) { | |
6c63e423 SS |
1695 | hfi1_cdbg(PROC, |
1696 | "ctxt%u: current Eager buffer size is invalid %u\n", | |
1697 | rcd->ctxt, rcd->egrbufs.rcvtid_size); | |
77241056 MM |
1698 | ret = -EINVAL; |
1699 | goto bail; | |
1700 | } | |
1701 | ||
1702 | for (idx = 0; idx < rcd->egrbufs.alloced; idx++) { | |
1703 | hfi1_put_tid(dd, rcd->eager_base + idx, PT_EAGER, | |
1704 | rcd->egrbufs.rcvtids[idx].phys, order); | |
1705 | cond_resched(); | |
1706 | } | |
1707 | goto bail; | |
1708 | ||
1709 | bail_rcvegrbuf_phys: | |
1710 | for (idx = 0; idx < rcd->egrbufs.alloced && | |
1711 | rcd->egrbufs.buffers[idx].addr; | |
1712 | idx++) { | |
1713 | dma_free_coherent(&dd->pcidev->dev, | |
1714 | rcd->egrbufs.buffers[idx].len, | |
1715 | rcd->egrbufs.buffers[idx].addr, | |
1716 | rcd->egrbufs.buffers[idx].phys); | |
1717 | rcd->egrbufs.buffers[idx].addr = NULL; | |
1718 | rcd->egrbufs.buffers[idx].phys = 0; | |
1719 | rcd->egrbufs.buffers[idx].len = 0; | |
1720 | } | |
1721 | bail: | |
1722 | return ret; | |
1723 | } |