Commit | Line | Data |
---|---|---|
77241056 MM |
1 | /* |
2 | * | |
3 | * This file is provided under a dual BSD/GPLv2 license. When using or | |
4 | * redistributing this file, you may do so under either license. | |
5 | * | |
6 | * GPL LICENSE SUMMARY | |
7 | * | |
91ab4ed3 | 8 | * Copyright(c) 2015, 2016 Intel Corporation. |
77241056 MM |
9 | * |
10 | * This program is free software; you can redistribute it and/or modify | |
11 | * it under the terms of version 2 of the GNU General Public License as | |
12 | * published by the Free Software Foundation. | |
13 | * | |
14 | * This program is distributed in the hope that it will be useful, but | |
15 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
17 | * General Public License for more details. | |
18 | * | |
19 | * BSD LICENSE | |
20 | * | |
91ab4ed3 | 21 | * Copyright(c) 2015, 2016 Intel Corporation. |
77241056 MM |
22 | * |
23 | * Redistribution and use in source and binary forms, with or without | |
24 | * modification, are permitted provided that the following conditions | |
25 | * are met: | |
26 | * | |
27 | * - Redistributions of source code must retain the above copyright | |
28 | * notice, this list of conditions and the following disclaimer. | |
29 | * - Redistributions in binary form must reproduce the above copyright | |
30 | * notice, this list of conditions and the following disclaimer in | |
31 | * the documentation and/or other materials provided with the | |
32 | * distribution. | |
33 | * - Neither the name of Intel Corporation nor the names of its | |
34 | * contributors may be used to endorse or promote products derived | |
35 | * from this software without specific prior written permission. | |
36 | * | |
37 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
38 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
39 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
40 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
41 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
42 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
43 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
44 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
45 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
46 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
47 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
48 | * | |
49 | */ | |
50 | ||
51 | #include <linux/pci.h> | |
52 | #include <linux/netdevice.h> | |
53 | #include <linux/vmalloc.h> | |
54 | #include <linux/delay.h> | |
55 | #include <linux/idr.h> | |
56 | #include <linux/module.h> | |
57 | #include <linux/printk.h> | |
58 | #include <linux/hrtimer.h> | |
ec3f2c12 | 59 | #include <rdma/rdma_vt.h> |
77241056 MM |
60 | |
61 | #include "hfi.h" | |
62 | #include "device.h" | |
63 | #include "common.h" | |
6c63e423 | 64 | #include "trace.h" |
77241056 MM |
65 | #include "mad.h" |
66 | #include "sdma.h" | |
67 | #include "debugfs.h" | |
68 | #include "verbs.h" | |
affa48de | 69 | #include "aspm.h" |
77241056 MM |
70 | |
71 | #undef pr_fmt | |
72 | #define pr_fmt(fmt) DRIVER_NAME ": " fmt | |
73 | ||
74 | /* | |
75 | * min buffers we want to have per context, after driver | |
76 | */ | |
77 | #define HFI1_MIN_USER_CTXT_BUFCNT 7 | |
78 | ||
79 | #define HFI1_MIN_HDRQ_EGRBUF_CNT 2 | |
e002dcc0 | 80 | #define HFI1_MAX_HDRQ_EGRBUF_CNT 16352 |
77241056 MM |
81 | #define HFI1_MIN_EAGER_BUFFER_SIZE (4 * 1024) /* 4KB */ |
82 | #define HFI1_MAX_EAGER_BUFFER_SIZE (256 * 1024) /* 256KB */ | |
83 | ||
84 | /* | |
85 | * Number of user receive contexts we are configured to use (to allow for more | |
86 | * pio buffers per ctxt, etc.) Zero means use one user context per CPU. | |
87 | */ | |
2ce6bf22 SS |
88 | int num_user_contexts = -1; |
89 | module_param_named(num_user_contexts, num_user_contexts, uint, S_IRUGO); | |
77241056 | 90 | MODULE_PARM_DESC( |
2ce6bf22 | 91 | num_user_contexts, "Set max number of user contexts to use"); |
77241056 | 92 | |
5b55ea3b | 93 | uint krcvqs[RXE_NUM_DATA_VL]; |
77241056 | 94 | int krcvqsset; |
5b55ea3b | 95 | module_param_array(krcvqs, uint, &krcvqsset, S_IRUGO); |
82c2611d | 96 | MODULE_PARM_DESC(krcvqs, "Array of the number of non-control kernel receive queues by VL"); |
77241056 MM |
97 | |
98 | /* computed based on above array */ | |
99 | unsigned n_krcvqs; | |
100 | ||
101 | static unsigned hfi1_rcvarr_split = 25; | |
102 | module_param_named(rcvarr_split, hfi1_rcvarr_split, uint, S_IRUGO); | |
103 | MODULE_PARM_DESC(rcvarr_split, "Percent of context's RcvArray entries used for Eager buffers"); | |
104 | ||
105 | static uint eager_buffer_size = (2 << 20); /* 2MB */ | |
106 | module_param(eager_buffer_size, uint, S_IRUGO); | |
107 | MODULE_PARM_DESC(eager_buffer_size, "Size of the eager buffers, default: 2MB"); | |
108 | ||
109 | static uint rcvhdrcnt = 2048; /* 2x the max eager buffer count */ | |
110 | module_param_named(rcvhdrcnt, rcvhdrcnt, uint, S_IRUGO); | |
111 | MODULE_PARM_DESC(rcvhdrcnt, "Receive header queue count (default 2048)"); | |
112 | ||
113 | static uint hfi1_hdrq_entsize = 32; | |
114 | module_param_named(hdrq_entsize, hfi1_hdrq_entsize, uint, S_IRUGO); | |
115 | MODULE_PARM_DESC(hdrq_entsize, "Size of header queue entries: 2 - 8B, 16 - 64B (default), 32 - 128B"); | |
116 | ||
117 | unsigned int user_credit_return_threshold = 33; /* default is 33% */ | |
118 | module_param(user_credit_return_threshold, uint, S_IRUGO); | |
ecb95a02 | 119 | MODULE_PARM_DESC(user_credit_return_threshold, "Credit return threshold for user send contexts, return when unreturned credits passes this many blocks (in percent of allocated blocks, 0 is off)"); |
77241056 MM |
120 | |
121 | static inline u64 encode_rcv_header_entry_size(u16); | |
122 | ||
123 | static struct idr hfi1_unit_table; | |
124 | u32 hfi1_cpulist_count; | |
125 | unsigned long *hfi1_cpulist; | |
126 | ||
127 | /* | |
128 | * Common code for creating the receive context array. | |
129 | */ | |
130 | int hfi1_create_ctxts(struct hfi1_devdata *dd) | |
131 | { | |
132 | unsigned i; | |
133 | int ret; | |
77241056 | 134 | |
82c2611d NV |
135 | /* Control context has to be always 0 */ |
136 | BUILD_BUG_ON(HFI1_CTRL_CTXT != 0); | |
137 | ||
377f111e MH |
138 | dd->rcd = kzalloc_node(dd->num_rcv_contexts * sizeof(*dd->rcd), |
139 | GFP_KERNEL, dd->node); | |
806e6e1b | 140 | if (!dd->rcd) |
77241056 | 141 | goto nomem; |
77241056 MM |
142 | |
143 | /* create one or more kernel contexts */ | |
144 | for (i = 0; i < dd->first_user_ctxt; ++i) { | |
145 | struct hfi1_pportdata *ppd; | |
146 | struct hfi1_ctxtdata *rcd; | |
147 | ||
148 | ppd = dd->pport + (i % dd->num_pports); | |
957558c9 | 149 | rcd = hfi1_create_ctxtdata(ppd, i, dd->node); |
77241056 MM |
150 | if (!rcd) { |
151 | dd_dev_err(dd, | |
152 | "Unable to allocate kernel receive context, failing\n"); | |
153 | goto nomem; | |
154 | } | |
155 | /* | |
156 | * Set up the kernel context flags here and now because they | |
157 | * use default values for all receive side memories. User | |
158 | * contexts will be handled as they are created. | |
159 | */ | |
160 | rcd->flags = HFI1_CAP_KGET(MULTI_PKT_EGR) | | |
161 | HFI1_CAP_KGET(NODROP_RHQ_FULL) | | |
162 | HFI1_CAP_KGET(NODROP_EGR_FULL) | | |
163 | HFI1_CAP_KGET(DMA_RTAIL); | |
82c2611d NV |
164 | |
165 | /* Control context must use DMA_RTAIL */ | |
166 | if (rcd->ctxt == HFI1_CTRL_CTXT) | |
167 | rcd->flags |= HFI1_CAP_DMA_RTAIL; | |
77241056 MM |
168 | rcd->seq_cnt = 1; |
169 | ||
170 | rcd->sc = sc_alloc(dd, SC_ACK, rcd->rcvhdrqentsize, dd->node); | |
171 | if (!rcd->sc) { | |
172 | dd_dev_err(dd, | |
173 | "Unable to allocate kernel send context, failing\n"); | |
174 | dd->rcd[rcd->ctxt] = NULL; | |
175 | hfi1_free_ctxtdata(dd, rcd); | |
176 | goto nomem; | |
177 | } | |
178 | ||
179 | ret = hfi1_init_ctxt(rcd->sc); | |
180 | if (ret < 0) { | |
181 | dd_dev_err(dd, | |
182 | "Failed to setup kernel receive context, failing\n"); | |
183 | sc_free(rcd->sc); | |
184 | dd->rcd[rcd->ctxt] = NULL; | |
185 | hfi1_free_ctxtdata(dd, rcd); | |
186 | ret = -EFAULT; | |
187 | goto bail; | |
188 | } | |
189 | } | |
190 | ||
affa48de AD |
191 | /* |
192 | * Initialize aspm, to be done after gen3 transition and setting up | |
193 | * contexts and before enabling interrupts | |
194 | */ | |
195 | aspm_init(dd); | |
196 | ||
77241056 MM |
197 | return 0; |
198 | nomem: | |
199 | ret = -ENOMEM; | |
200 | bail: | |
201 | kfree(dd->rcd); | |
202 | dd->rcd = NULL; | |
203 | return ret; | |
204 | } | |
205 | ||
206 | /* | |
207 | * Common code for user and kernel context setup. | |
208 | */ | |
957558c9 MH |
209 | struct hfi1_ctxtdata *hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, u32 ctxt, |
210 | int numa) | |
77241056 MM |
211 | { |
212 | struct hfi1_devdata *dd = ppd->dd; | |
213 | struct hfi1_ctxtdata *rcd; | |
214 | unsigned kctxt_ngroups = 0; | |
215 | u32 base; | |
216 | ||
217 | if (dd->rcv_entries.nctxt_extra > | |
218 | dd->num_rcv_contexts - dd->first_user_ctxt) | |
219 | kctxt_ngroups = (dd->rcv_entries.nctxt_extra - | |
220 | (dd->num_rcv_contexts - dd->first_user_ctxt)); | |
221 | rcd = kzalloc(sizeof(*rcd), GFP_KERNEL); | |
222 | if (rcd) { | |
223 | u32 rcvtids, max_entries; | |
224 | ||
6c63e423 | 225 | hfi1_cdbg(PROC, "setting up context %u\n", ctxt); |
77241056 MM |
226 | |
227 | INIT_LIST_HEAD(&rcd->qp_wait_list); | |
228 | rcd->ppd = ppd; | |
229 | rcd->dd = dd; | |
230 | rcd->cnt = 1; | |
231 | rcd->ctxt = ctxt; | |
232 | dd->rcd[ctxt] = rcd; | |
957558c9 | 233 | rcd->numa_id = numa; |
77241056 MM |
234 | rcd->rcv_array_groups = dd->rcv_entries.ngroups; |
235 | ||
463e6ebc | 236 | mutex_init(&rcd->exp_lock); |
77241056 MM |
237 | |
238 | /* | |
239 | * Calculate the context's RcvArray entry starting point. | |
240 | * We do this here because we have to take into account all | |
241 | * the RcvArray entries that previous context would have | |
242 | * taken and we have to account for any extra groups | |
243 | * assigned to the kernel or user contexts. | |
244 | */ | |
245 | if (ctxt < dd->first_user_ctxt) { | |
246 | if (ctxt < kctxt_ngroups) { | |
247 | base = ctxt * (dd->rcv_entries.ngroups + 1); | |
248 | rcd->rcv_array_groups++; | |
249 | } else | |
250 | base = kctxt_ngroups + | |
251 | (ctxt * dd->rcv_entries.ngroups); | |
252 | } else { | |
253 | u16 ct = ctxt - dd->first_user_ctxt; | |
254 | ||
255 | base = ((dd->n_krcv_queues * dd->rcv_entries.ngroups) + | |
256 | kctxt_ngroups); | |
257 | if (ct < dd->rcv_entries.nctxt_extra) { | |
258 | base += ct * (dd->rcv_entries.ngroups + 1); | |
259 | rcd->rcv_array_groups++; | |
260 | } else | |
261 | base += dd->rcv_entries.nctxt_extra + | |
262 | (ct * dd->rcv_entries.ngroups); | |
263 | } | |
264 | rcd->eager_base = base * dd->rcv_entries.group_size; | |
265 | ||
266 | /* Validate and initialize Rcv Hdr Q variables */ | |
267 | if (rcvhdrcnt % HDRQ_INCREMENT) { | |
268 | dd_dev_err(dd, | |
349ac71f | 269 | "ctxt%u: header queue count %d must be divisible by %lu\n", |
77241056 MM |
270 | rcd->ctxt, rcvhdrcnt, HDRQ_INCREMENT); |
271 | goto bail; | |
272 | } | |
273 | rcd->rcvhdrq_cnt = rcvhdrcnt; | |
274 | rcd->rcvhdrqentsize = hfi1_hdrq_entsize; | |
275 | /* | |
276 | * Simple Eager buffer allocation: we have already pre-allocated | |
277 | * the number of RcvArray entry groups. Each ctxtdata structure | |
278 | * holds the number of groups for that context. | |
279 | * | |
280 | * To follow CSR requirements and maintain cacheline alignment, | |
281 | * make sure all sizes and bases are multiples of group_size. | |
282 | * | |
283 | * The expected entry count is what is left after assigning | |
284 | * eager. | |
285 | */ | |
286 | max_entries = rcd->rcv_array_groups * | |
287 | dd->rcv_entries.group_size; | |
288 | rcvtids = ((max_entries * hfi1_rcvarr_split) / 100); | |
289 | rcd->egrbufs.count = round_down(rcvtids, | |
290 | dd->rcv_entries.group_size); | |
291 | if (rcd->egrbufs.count > MAX_EAGER_ENTRIES) { | |
292 | dd_dev_err(dd, "ctxt%u: requested too many RcvArray entries.\n", | |
293 | rcd->ctxt); | |
294 | rcd->egrbufs.count = MAX_EAGER_ENTRIES; | |
295 | } | |
6c63e423 SS |
296 | hfi1_cdbg(PROC, |
297 | "ctxt%u: max Eager buffer RcvArray entries: %u\n", | |
298 | rcd->ctxt, rcd->egrbufs.count); | |
77241056 MM |
299 | |
300 | /* | |
301 | * Allocate array that will hold the eager buffer accounting | |
302 | * data. | |
303 | * This will allocate the maximum possible buffer count based | |
304 | * on the value of the RcvArray split parameter. | |
305 | * The resulting value will be rounded down to the closest | |
306 | * multiple of dd->rcv_entries.group_size. | |
307 | */ | |
314fcc0d SB |
308 | rcd->egrbufs.buffers = kcalloc(rcd->egrbufs.count, |
309 | sizeof(*rcd->egrbufs.buffers), | |
310 | GFP_KERNEL); | |
77241056 MM |
311 | if (!rcd->egrbufs.buffers) |
312 | goto bail; | |
314fcc0d SB |
313 | rcd->egrbufs.rcvtids = kcalloc(rcd->egrbufs.count, |
314 | sizeof(*rcd->egrbufs.rcvtids), | |
315 | GFP_KERNEL); | |
77241056 MM |
316 | if (!rcd->egrbufs.rcvtids) |
317 | goto bail; | |
318 | rcd->egrbufs.size = eager_buffer_size; | |
319 | /* | |
320 | * The size of the buffers programmed into the RcvArray | |
321 | * entries needs to be big enough to handle the highest | |
322 | * MTU supported. | |
323 | */ | |
324 | if (rcd->egrbufs.size < hfi1_max_mtu) { | |
325 | rcd->egrbufs.size = __roundup_pow_of_two(hfi1_max_mtu); | |
6c63e423 SS |
326 | hfi1_cdbg(PROC, |
327 | "ctxt%u: eager bufs size too small. Adjusting to %zu\n", | |
77241056 MM |
328 | rcd->ctxt, rcd->egrbufs.size); |
329 | } | |
330 | rcd->egrbufs.rcvtid_size = HFI1_MAX_EAGER_BUFFER_SIZE; | |
331 | ||
332 | if (ctxt < dd->first_user_ctxt) { /* N/A for PSM contexts */ | |
333 | rcd->opstats = kzalloc(sizeof(*rcd->opstats), | |
334 | GFP_KERNEL); | |
806e6e1b | 335 | if (!rcd->opstats) |
77241056 | 336 | goto bail; |
77241056 MM |
337 | } |
338 | } | |
339 | return rcd; | |
340 | bail: | |
341 | kfree(rcd->opstats); | |
342 | kfree(rcd->egrbufs.rcvtids); | |
343 | kfree(rcd->egrbufs.buffers); | |
344 | kfree(rcd); | |
345 | return NULL; | |
346 | } | |
347 | ||
348 | /* | |
349 | * Convert a receive header entry size that to the encoding used in the CSR. | |
350 | * | |
351 | * Return a zero if the given size is invalid. | |
352 | */ | |
353 | static inline u64 encode_rcv_header_entry_size(u16 size) | |
354 | { | |
355 | /* there are only 3 valid receive header entry sizes */ | |
356 | if (size == 2) | |
357 | return 1; | |
358 | if (size == 16) | |
359 | return 2; | |
360 | else if (size == 32) | |
361 | return 4; | |
362 | return 0; /* invalid */ | |
363 | } | |
364 | ||
365 | /* | |
366 | * Select the largest ccti value over all SLs to determine the intra- | |
367 | * packet gap for the link. | |
368 | * | |
369 | * called with cca_timer_lock held (to protect access to cca_timer | |
370 | * array), and rcu_read_lock() (to protect access to cc_state). | |
371 | */ | |
372 | void set_link_ipg(struct hfi1_pportdata *ppd) | |
373 | { | |
374 | struct hfi1_devdata *dd = ppd->dd; | |
375 | struct cc_state *cc_state; | |
376 | int i; | |
377 | u16 cce, ccti_limit, max_ccti = 0; | |
378 | u16 shift, mult; | |
379 | u64 src; | |
380 | u32 current_egress_rate; /* Mbits /sec */ | |
381 | u32 max_pkt_time; | |
382 | /* | |
383 | * max_pkt_time is the maximum packet egress time in units | |
384 | * of the fabric clock period 1/(805 MHz). | |
385 | */ | |
386 | ||
387 | cc_state = get_cc_state(ppd); | |
388 | ||
389 | if (cc_state == NULL) | |
390 | /* | |
391 | * This should _never_ happen - rcu_read_lock() is held, | |
392 | * and set_link_ipg() should not be called if cc_state | |
393 | * is NULL. | |
394 | */ | |
395 | return; | |
396 | ||
397 | for (i = 0; i < OPA_MAX_SLS; i++) { | |
398 | u16 ccti = ppd->cca_timer[i].ccti; | |
399 | ||
400 | if (ccti > max_ccti) | |
401 | max_ccti = ccti; | |
402 | } | |
403 | ||
404 | ccti_limit = cc_state->cct.ccti_limit; | |
405 | if (max_ccti > ccti_limit) | |
406 | max_ccti = ccti_limit; | |
407 | ||
408 | cce = cc_state->cct.entries[max_ccti].entry; | |
409 | shift = (cce & 0xc000) >> 14; | |
410 | mult = (cce & 0x3fff); | |
411 | ||
412 | current_egress_rate = active_egress_rate(ppd); | |
413 | ||
414 | max_pkt_time = egress_cycles(ppd->ibmaxlen, current_egress_rate); | |
415 | ||
416 | src = (max_pkt_time >> shift) * mult; | |
417 | ||
418 | src &= SEND_STATIC_RATE_CONTROL_CSR_SRC_RELOAD_SMASK; | |
419 | src <<= SEND_STATIC_RATE_CONTROL_CSR_SRC_RELOAD_SHIFT; | |
420 | ||
421 | write_csr(dd, SEND_STATIC_RATE_CONTROL, src); | |
422 | } | |
423 | ||
424 | static enum hrtimer_restart cca_timer_fn(struct hrtimer *t) | |
425 | { | |
426 | struct cca_timer *cca_timer; | |
427 | struct hfi1_pportdata *ppd; | |
428 | int sl; | |
429 | u16 ccti, ccti_timer, ccti_min; | |
430 | struct cc_state *cc_state; | |
b77d713a | 431 | unsigned long flags; |
77241056 MM |
432 | |
433 | cca_timer = container_of(t, struct cca_timer, hrtimer); | |
434 | ppd = cca_timer->ppd; | |
435 | sl = cca_timer->sl; | |
436 | ||
437 | rcu_read_lock(); | |
438 | ||
439 | cc_state = get_cc_state(ppd); | |
440 | ||
441 | if (cc_state == NULL) { | |
442 | rcu_read_unlock(); | |
443 | return HRTIMER_NORESTART; | |
444 | } | |
445 | ||
446 | /* | |
447 | * 1) decrement ccti for SL | |
448 | * 2) calculate IPG for link (set_link_ipg()) | |
449 | * 3) restart timer, unless ccti is at min value | |
450 | */ | |
451 | ||
452 | ccti_min = cc_state->cong_setting.entries[sl].ccti_min; | |
453 | ccti_timer = cc_state->cong_setting.entries[sl].ccti_timer; | |
454 | ||
b77d713a | 455 | spin_lock_irqsave(&ppd->cca_timer_lock, flags); |
77241056 MM |
456 | |
457 | ccti = cca_timer->ccti; | |
458 | ||
459 | if (ccti > ccti_min) { | |
460 | cca_timer->ccti--; | |
461 | set_link_ipg(ppd); | |
462 | } | |
463 | ||
b77d713a | 464 | spin_unlock_irqrestore(&ppd->cca_timer_lock, flags); |
77241056 MM |
465 | |
466 | rcu_read_unlock(); | |
467 | ||
468 | if (ccti > ccti_min) { | |
469 | unsigned long nsec = 1024 * ccti_timer; | |
470 | /* ccti_timer is in units of 1.024 usec */ | |
471 | hrtimer_forward_now(t, ns_to_ktime(nsec)); | |
472 | return HRTIMER_RESTART; | |
473 | } | |
474 | return HRTIMER_NORESTART; | |
475 | } | |
476 | ||
477 | /* | |
478 | * Common code for initializing the physical port structure. | |
479 | */ | |
480 | void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd, | |
481 | struct hfi1_devdata *dd, u8 hw_pidx, u8 port) | |
482 | { | |
483 | int i, size; | |
484 | uint default_pkey_idx; | |
485 | ||
486 | ppd->dd = dd; | |
487 | ppd->hw_pidx = hw_pidx; | |
488 | ppd->port = port; /* IB port number, not index */ | |
489 | ||
490 | default_pkey_idx = 1; | |
491 | ||
492 | ppd->pkeys[default_pkey_idx] = DEFAULT_P_KEY; | |
493 | if (loopback) { | |
494 | hfi1_early_err(&pdev->dev, | |
495 | "Faking data partition 0x8001 in idx %u\n", | |
496 | !default_pkey_idx); | |
497 | ppd->pkeys[!default_pkey_idx] = 0x8001; | |
498 | } | |
499 | ||
500 | INIT_WORK(&ppd->link_vc_work, handle_verify_cap); | |
501 | INIT_WORK(&ppd->link_up_work, handle_link_up); | |
502 | INIT_WORK(&ppd->link_down_work, handle_link_down); | |
cbac386a | 503 | INIT_WORK(&ppd->dc_host_req_work, handle_8051_request); |
77241056 MM |
504 | INIT_WORK(&ppd->freeze_work, handle_freeze); |
505 | INIT_WORK(&ppd->link_downgrade_work, handle_link_downgrade); | |
506 | INIT_WORK(&ppd->sma_message_work, handle_sma_message); | |
507 | INIT_WORK(&ppd->link_bounce_work, handle_link_bounce); | |
fb9036dd | 508 | INIT_WORK(&ppd->linkstate_active_work, receive_interrupt_work); |
8ebd4cf1 EH |
509 | INIT_WORK(&ppd->qsfp_info.qsfp_work, qsfp_event); |
510 | ||
77241056 MM |
511 | mutex_init(&ppd->hls_lock); |
512 | spin_lock_init(&ppd->sdma_alllock); | |
513 | spin_lock_init(&ppd->qsfp_info.qsfp_lock); | |
514 | ||
8ebd4cf1 | 515 | ppd->qsfp_info.ppd = ppd; |
77241056 MM |
516 | ppd->sm_trap_qp = 0x0; |
517 | ppd->sa_qp = 0x1; | |
518 | ||
519 | ppd->hfi1_wq = NULL; | |
520 | ||
521 | spin_lock_init(&ppd->cca_timer_lock); | |
522 | ||
523 | for (i = 0; i < OPA_MAX_SLS; i++) { | |
524 | hrtimer_init(&ppd->cca_timer[i].hrtimer, CLOCK_MONOTONIC, | |
525 | HRTIMER_MODE_REL); | |
526 | ppd->cca_timer[i].ppd = ppd; | |
527 | ppd->cca_timer[i].sl = i; | |
528 | ppd->cca_timer[i].ccti = 0; | |
529 | ppd->cca_timer[i].hrtimer.function = cca_timer_fn; | |
530 | } | |
531 | ||
532 | ppd->cc_max_table_entries = IB_CC_TABLE_CAP_DEFAULT; | |
533 | ||
534 | spin_lock_init(&ppd->cc_state_lock); | |
535 | spin_lock_init(&ppd->cc_log_lock); | |
536 | size = sizeof(struct cc_state); | |
537 | RCU_INIT_POINTER(ppd->cc_state, kzalloc(size, GFP_KERNEL)); | |
538 | if (!rcu_dereference(ppd->cc_state)) | |
539 | goto bail; | |
540 | return; | |
541 | ||
542 | bail: | |
543 | ||
544 | hfi1_early_err(&pdev->dev, | |
545 | "Congestion Control Agent disabled for port %d\n", port); | |
546 | } | |
547 | ||
548 | /* | |
549 | * Do initialization for device that is only needed on | |
550 | * first detect, not on resets. | |
551 | */ | |
552 | static int loadtime_init(struct hfi1_devdata *dd) | |
553 | { | |
554 | return 0; | |
555 | } | |
556 | ||
557 | /** | |
558 | * init_after_reset - re-initialize after a reset | |
559 | * @dd: the hfi1_ib device | |
560 | * | |
561 | * sanity check at least some of the values after reset, and | |
562 | * ensure no receive or transmit (explicitly, in case reset | |
563 | * failed | |
564 | */ | |
565 | static int init_after_reset(struct hfi1_devdata *dd) | |
566 | { | |
567 | int i; | |
568 | ||
569 | /* | |
570 | * Ensure chip does no sends or receives, tail updates, or | |
571 | * pioavail updates while we re-initialize. This is mostly | |
572 | * for the driver data structures, not chip registers. | |
573 | */ | |
574 | for (i = 0; i < dd->num_rcv_contexts; i++) | |
575 | hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS | | |
576 | HFI1_RCVCTRL_INTRAVAIL_DIS | | |
577 | HFI1_RCVCTRL_TAILUPD_DIS, i); | |
578 | pio_send_control(dd, PSC_GLOBAL_DISABLE); | |
579 | for (i = 0; i < dd->num_send_contexts; i++) | |
580 | sc_disable(dd->send_contexts[i].sc); | |
581 | ||
582 | return 0; | |
583 | } | |
584 | ||
585 | static void enable_chip(struct hfi1_devdata *dd) | |
586 | { | |
587 | u32 rcvmask; | |
588 | u32 i; | |
589 | ||
590 | /* enable PIO send */ | |
591 | pio_send_control(dd, PSC_GLOBAL_ENABLE); | |
592 | ||
593 | /* | |
594 | * Enable kernel ctxts' receive and receive interrupt. | |
595 | * Other ctxts done as user opens and initializes them. | |
596 | */ | |
77241056 | 597 | for (i = 0; i < dd->first_user_ctxt; ++i) { |
566c157c | 598 | rcvmask = HFI1_RCVCTRL_CTXT_ENB | HFI1_RCVCTRL_INTRAVAIL_ENB; |
77241056 MM |
599 | rcvmask |= HFI1_CAP_KGET_MASK(dd->rcd[i]->flags, DMA_RTAIL) ? |
600 | HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS; | |
601 | if (!HFI1_CAP_KGET_MASK(dd->rcd[i]->flags, MULTI_PKT_EGR)) | |
602 | rcvmask |= HFI1_RCVCTRL_ONE_PKT_EGR_ENB; | |
603 | if (HFI1_CAP_KGET_MASK(dd->rcd[i]->flags, NODROP_RHQ_FULL)) | |
604 | rcvmask |= HFI1_RCVCTRL_NO_RHQ_DROP_ENB; | |
605 | if (HFI1_CAP_KGET_MASK(dd->rcd[i]->flags, NODROP_EGR_FULL)) | |
606 | rcvmask |= HFI1_RCVCTRL_NO_EGR_DROP_ENB; | |
607 | hfi1_rcvctrl(dd, rcvmask, i); | |
608 | sc_enable(dd->rcd[i]->sc); | |
609 | } | |
610 | } | |
611 | ||
612 | /** | |
613 | * create_workqueues - create per port workqueues | |
614 | * @dd: the hfi1_ib device | |
615 | */ | |
616 | static int create_workqueues(struct hfi1_devdata *dd) | |
617 | { | |
618 | int pidx; | |
619 | struct hfi1_pportdata *ppd; | |
620 | ||
621 | for (pidx = 0; pidx < dd->num_pports; ++pidx) { | |
622 | ppd = dd->pport + pidx; | |
623 | if (!ppd->hfi1_wq) { | |
77241056 | 624 | ppd->hfi1_wq = |
0a226edd MM |
625 | alloc_workqueue( |
626 | "hfi%d_%d", | |
627 | WQ_SYSFS | WQ_HIGHPRI | WQ_CPU_INTENSIVE, | |
628 | dd->num_sdma, | |
629 | dd->unit, pidx); | |
77241056 MM |
630 | if (!ppd->hfi1_wq) |
631 | goto wq_error; | |
632 | } | |
633 | } | |
634 | return 0; | |
635 | wq_error: | |
0a226edd | 636 | pr_err("alloc_workqueue failed for port %d\n", pidx + 1); |
77241056 MM |
637 | for (pidx = 0; pidx < dd->num_pports; ++pidx) { |
638 | ppd = dd->pport + pidx; | |
639 | if (ppd->hfi1_wq) { | |
640 | destroy_workqueue(ppd->hfi1_wq); | |
641 | ppd->hfi1_wq = NULL; | |
642 | } | |
643 | } | |
644 | return -ENOMEM; | |
645 | } | |
646 | ||
647 | /** | |
648 | * hfi1_init - do the actual initialization sequence on the chip | |
649 | * @dd: the hfi1_ib device | |
650 | * @reinit: re-initializing, so don't allocate new memory | |
651 | * | |
652 | * Do the actual initialization sequence on the chip. This is done | |
653 | * both from the init routine called from the PCI infrastructure, and | |
654 | * when we reset the chip, or detect that it was reset internally, | |
655 | * or it's administratively re-enabled. | |
656 | * | |
657 | * Memory allocation here and in called routines is only done in | |
658 | * the first case (reinit == 0). We have to be careful, because even | |
659 | * without memory allocation, we need to re-write all the chip registers | |
660 | * TIDs, etc. after the reset or enable has completed. | |
661 | */ | |
662 | int hfi1_init(struct hfi1_devdata *dd, int reinit) | |
663 | { | |
664 | int ret = 0, pidx, lastfail = 0; | |
665 | unsigned i, len; | |
666 | struct hfi1_ctxtdata *rcd; | |
667 | struct hfi1_pportdata *ppd; | |
668 | ||
669 | /* Set up recv low level handlers */ | |
670 | dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_EXPECTED] = | |
671 | kdeth_process_expected; | |
672 | dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_EAGER] = | |
673 | kdeth_process_eager; | |
674 | dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_IB] = process_receive_ib; | |
675 | dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_ERROR] = | |
676 | process_receive_error; | |
677 | dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_BYPASS] = | |
678 | process_receive_bypass; | |
679 | dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_INVALID5] = | |
680 | process_receive_invalid; | |
681 | dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_INVALID6] = | |
682 | process_receive_invalid; | |
683 | dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_INVALID7] = | |
684 | process_receive_invalid; | |
685 | dd->rhf_rcv_function_map = dd->normal_rhf_rcv_functions; | |
686 | ||
687 | /* Set up send low level handlers */ | |
688 | dd->process_pio_send = hfi1_verbs_send_pio; | |
689 | dd->process_dma_send = hfi1_verbs_send_dma; | |
690 | dd->pio_inline_send = pio_copy; | |
691 | ||
995deafa | 692 | if (is_ax(dd)) { |
77241056 MM |
693 | atomic_set(&dd->drop_packet, DROP_PACKET_ON); |
694 | dd->do_drop = 1; | |
695 | } else { | |
696 | atomic_set(&dd->drop_packet, DROP_PACKET_OFF); | |
697 | dd->do_drop = 0; | |
698 | } | |
699 | ||
700 | /* make sure the link is not "up" */ | |
701 | for (pidx = 0; pidx < dd->num_pports; ++pidx) { | |
702 | ppd = dd->pport + pidx; | |
703 | ppd->linkup = 0; | |
704 | } | |
705 | ||
706 | if (reinit) | |
707 | ret = init_after_reset(dd); | |
708 | else | |
709 | ret = loadtime_init(dd); | |
710 | if (ret) | |
711 | goto done; | |
712 | ||
46b010d3 MB |
713 | /* allocate dummy tail memory for all receive contexts */ |
714 | dd->rcvhdrtail_dummy_kvaddr = dma_zalloc_coherent( | |
715 | &dd->pcidev->dev, sizeof(u64), | |
716 | &dd->rcvhdrtail_dummy_physaddr, | |
717 | GFP_KERNEL); | |
718 | ||
719 | if (!dd->rcvhdrtail_dummy_kvaddr) { | |
720 | dd_dev_err(dd, "cannot allocate dummy tail memory\n"); | |
721 | ret = -ENOMEM; | |
722 | goto done; | |
723 | } | |
724 | ||
77241056 MM |
725 | /* dd->rcd can be NULL if early initialization failed */ |
726 | for (i = 0; dd->rcd && i < dd->first_user_ctxt; ++i) { | |
727 | /* | |
728 | * Set up the (kernel) rcvhdr queue and egr TIDs. If doing | |
729 | * re-init, the simplest way to handle this is to free | |
730 | * existing, and re-allocate. | |
731 | * Need to re-create rest of ctxt 0 ctxtdata as well. | |
732 | */ | |
733 | rcd = dd->rcd[i]; | |
734 | if (!rcd) | |
735 | continue; | |
736 | ||
737 | rcd->do_interrupt = &handle_receive_interrupt; | |
738 | ||
739 | lastfail = hfi1_create_rcvhdrq(dd, rcd); | |
740 | if (!lastfail) | |
741 | lastfail = hfi1_setup_eagerbufs(rcd); | |
742 | if (lastfail) | |
743 | dd_dev_err(dd, | |
744 | "failed to allocate kernel ctxt's rcvhdrq and/or egr bufs\n"); | |
745 | } | |
746 | if (lastfail) | |
747 | ret = lastfail; | |
748 | ||
749 | /* Allocate enough memory for user event notification. */ | |
750 | len = ALIGN(dd->chip_rcv_contexts * HFI1_MAX_SHARED_CTXTS * | |
751 | sizeof(*dd->events), PAGE_SIZE); | |
752 | dd->events = vmalloc_user(len); | |
753 | if (!dd->events) | |
754 | dd_dev_err(dd, "Failed to allocate user events page\n"); | |
755 | /* | |
756 | * Allocate a page for device and port status. | |
757 | * Page will be shared amongst all user processes. | |
758 | */ | |
759 | dd->status = vmalloc_user(PAGE_SIZE); | |
760 | if (!dd->status) | |
761 | dd_dev_err(dd, "Failed to allocate dev status page\n"); | |
762 | else | |
763 | dd->freezelen = PAGE_SIZE - (sizeof(*dd->status) - | |
764 | sizeof(dd->status->freezemsg)); | |
765 | for (pidx = 0; pidx < dd->num_pports; ++pidx) { | |
766 | ppd = dd->pport + pidx; | |
767 | if (dd->status) | |
768 | /* Currently, we only have one port */ | |
769 | ppd->statusp = &dd->status->port; | |
770 | ||
771 | set_mtu(ppd); | |
772 | } | |
773 | ||
774 | /* enable chip even if we have an error, so we can debug cause */ | |
775 | enable_chip(dd); | |
776 | ||
77241056 MM |
777 | done: |
778 | /* | |
779 | * Set status even if port serdes is not initialized | |
780 | * so that diags will work. | |
781 | */ | |
782 | if (dd->status) | |
783 | dd->status->dev |= HFI1_STATUS_CHIP_PRESENT | | |
784 | HFI1_STATUS_INITTED; | |
785 | if (!ret) { | |
786 | /* enable all interrupts from the chip */ | |
787 | set_intr_state(dd, 1); | |
788 | ||
789 | /* chip is OK for user apps; mark it as initialized */ | |
790 | for (pidx = 0; pidx < dd->num_pports; ++pidx) { | |
791 | ppd = dd->pport + pidx; | |
792 | ||
77241056 MM |
793 | /* start the serdes - must be after interrupts are |
794 | enabled so we are notified when the link goes up */ | |
795 | lastfail = bringup_serdes(ppd); | |
796 | if (lastfail) | |
797 | dd_dev_info(dd, | |
798 | "Failed to bring up port %u\n", | |
799 | ppd->port); | |
800 | ||
801 | /* | |
802 | * Set status even if port serdes is not initialized | |
803 | * so that diags will work. | |
804 | */ | |
805 | if (ppd->statusp) | |
806 | *ppd->statusp |= HFI1_STATUS_CHIP_PRESENT | | |
807 | HFI1_STATUS_INITTED; | |
808 | if (!ppd->link_speed_enabled) | |
809 | continue; | |
810 | } | |
811 | } | |
812 | ||
813 | /* if ret is non-zero, we probably should do some cleanup here... */ | |
814 | return ret; | |
815 | } | |
816 | ||
817 | static inline struct hfi1_devdata *__hfi1_lookup(int unit) | |
818 | { | |
819 | return idr_find(&hfi1_unit_table, unit); | |
820 | } | |
821 | ||
822 | struct hfi1_devdata *hfi1_lookup(int unit) | |
823 | { | |
824 | struct hfi1_devdata *dd; | |
825 | unsigned long flags; | |
826 | ||
827 | spin_lock_irqsave(&hfi1_devs_lock, flags); | |
828 | dd = __hfi1_lookup(unit); | |
829 | spin_unlock_irqrestore(&hfi1_devs_lock, flags); | |
830 | ||
831 | return dd; | |
832 | } | |
833 | ||
834 | /* | |
835 | * Stop the timers during unit shutdown, or after an error late | |
836 | * in initialization. | |
837 | */ | |
838 | static void stop_timers(struct hfi1_devdata *dd) | |
839 | { | |
840 | struct hfi1_pportdata *ppd; | |
841 | int pidx; | |
842 | ||
843 | for (pidx = 0; pidx < dd->num_pports; ++pidx) { | |
844 | ppd = dd->pport + pidx; | |
845 | if (ppd->led_override_timer.data) { | |
846 | del_timer_sync(&ppd->led_override_timer); | |
847 | atomic_set(&ppd->led_override_timer_active, 0); | |
848 | } | |
849 | } | |
850 | } | |
851 | ||
852 | /** | |
853 | * shutdown_device - shut down a device | |
854 | * @dd: the hfi1_ib device | |
855 | * | |
856 | * This is called to make the device quiet when we are about to | |
857 | * unload the driver, and also when the device is administratively | |
858 | * disabled. It does not free any data structures. | |
859 | * Everything it does has to be setup again by hfi1_init(dd, 1) | |
860 | */ | |
861 | static void shutdown_device(struct hfi1_devdata *dd) | |
862 | { | |
863 | struct hfi1_pportdata *ppd; | |
864 | unsigned pidx; | |
865 | int i; | |
866 | ||
867 | for (pidx = 0; pidx < dd->num_pports; ++pidx) { | |
868 | ppd = dd->pport + pidx; | |
869 | ||
870 | ppd->linkup = 0; | |
871 | if (ppd->statusp) | |
872 | *ppd->statusp &= ~(HFI1_STATUS_IB_CONF | | |
873 | HFI1_STATUS_IB_READY); | |
874 | } | |
875 | dd->flags &= ~HFI1_INITTED; | |
876 | ||
877 | /* mask interrupts, but not errors */ | |
878 | set_intr_state(dd, 0); | |
879 | ||
880 | for (pidx = 0; pidx < dd->num_pports; ++pidx) { | |
881 | ppd = dd->pport + pidx; | |
882 | for (i = 0; i < dd->num_rcv_contexts; i++) | |
883 | hfi1_rcvctrl(dd, HFI1_RCVCTRL_TAILUPD_DIS | | |
884 | HFI1_RCVCTRL_CTXT_DIS | | |
885 | HFI1_RCVCTRL_INTRAVAIL_DIS | | |
886 | HFI1_RCVCTRL_PKEY_DIS | | |
887 | HFI1_RCVCTRL_ONE_PKT_EGR_DIS, i); | |
888 | /* | |
889 | * Gracefully stop all sends allowing any in progress to | |
890 | * trickle out first. | |
891 | */ | |
892 | for (i = 0; i < dd->num_send_contexts; i++) | |
893 | sc_flush(dd->send_contexts[i].sc); | |
894 | } | |
895 | ||
896 | /* | |
897 | * Enough for anything that's going to trickle out to have actually | |
898 | * done so. | |
899 | */ | |
900 | udelay(20); | |
901 | ||
902 | for (pidx = 0; pidx < dd->num_pports; ++pidx) { | |
903 | ppd = dd->pport + pidx; | |
904 | ||
905 | /* disable all contexts */ | |
906 | for (i = 0; i < dd->num_send_contexts; i++) | |
907 | sc_disable(dd->send_contexts[i].sc); | |
908 | /* disable the send device */ | |
909 | pio_send_control(dd, PSC_GLOBAL_DISABLE); | |
910 | ||
91ab4ed3 EH |
911 | shutdown_led_override(ppd); |
912 | ||
77241056 MM |
913 | /* |
914 | * Clear SerdesEnable. | |
915 | * We can't count on interrupts since we are stopping. | |
916 | */ | |
917 | hfi1_quiet_serdes(ppd); | |
918 | ||
919 | if (ppd->hfi1_wq) { | |
920 | destroy_workqueue(ppd->hfi1_wq); | |
921 | ppd->hfi1_wq = NULL; | |
922 | } | |
923 | } | |
924 | sdma_exit(dd); | |
925 | } | |
926 | ||
927 | /** | |
928 | * hfi1_free_ctxtdata - free a context's allocated data | |
929 | * @dd: the hfi1_ib device | |
930 | * @rcd: the ctxtdata structure | |
931 | * | |
932 | * free up any allocated data for a context | |
933 | * This should not touch anything that would affect a simultaneous | |
934 | * re-allocation of context data, because it is called after hfi1_mutex | |
935 | * is released (and can be called from reinit as well). | |
936 | * It should never change any chip state, or global driver state. | |
937 | */ | |
938 | void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd) | |
939 | { | |
940 | unsigned e; | |
941 | ||
942 | if (!rcd) | |
943 | return; | |
944 | ||
945 | if (rcd->rcvhdrq) { | |
946 | dma_free_coherent(&dd->pcidev->dev, rcd->rcvhdrq_size, | |
947 | rcd->rcvhdrq, rcd->rcvhdrq_phys); | |
948 | rcd->rcvhdrq = NULL; | |
949 | if (rcd->rcvhdrtail_kvaddr) { | |
950 | dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE, | |
951 | (void *)rcd->rcvhdrtail_kvaddr, | |
952 | rcd->rcvhdrqtailaddr_phys); | |
953 | rcd->rcvhdrtail_kvaddr = NULL; | |
954 | } | |
955 | } | |
956 | ||
957 | /* all the RcvArray entries should have been cleared by now */ | |
958 | kfree(rcd->egrbufs.rcvtids); | |
959 | ||
960 | for (e = 0; e < rcd->egrbufs.alloced; e++) { | |
961 | if (rcd->egrbufs.buffers[e].phys) | |
962 | dma_free_coherent(&dd->pcidev->dev, | |
963 | rcd->egrbufs.buffers[e].len, | |
964 | rcd->egrbufs.buffers[e].addr, | |
965 | rcd->egrbufs.buffers[e].phys); | |
966 | } | |
967 | kfree(rcd->egrbufs.buffers); | |
968 | ||
969 | sc_free(rcd->sc); | |
77241056 MM |
970 | vfree(rcd->user_event_mask); |
971 | vfree(rcd->subctxt_uregbase); | |
972 | vfree(rcd->subctxt_rcvegrbuf); | |
973 | vfree(rcd->subctxt_rcvhdr_base); | |
77241056 MM |
974 | kfree(rcd->opstats); |
975 | kfree(rcd); | |
976 | } | |
977 | ||
978 | void hfi1_free_devdata(struct hfi1_devdata *dd) | |
979 | { | |
980 | unsigned long flags; | |
981 | ||
982 | spin_lock_irqsave(&hfi1_devs_lock, flags); | |
983 | idr_remove(&hfi1_unit_table, dd->unit); | |
984 | list_del(&dd->list); | |
985 | spin_unlock_irqrestore(&hfi1_devs_lock, flags); | |
986 | hfi1_dbg_ibdev_exit(&dd->verbs_dev); | |
987 | rcu_barrier(); /* wait for rcu callbacks to complete */ | |
988 | free_percpu(dd->int_counter); | |
989 | free_percpu(dd->rcv_limit); | |
957558c9 | 990 | hfi1_dev_affinity_free(dd); |
89abfc8d | 991 | free_percpu(dd->send_schedule); |
ec3f2c12 | 992 | ib_dealloc_device(&dd->verbs_dev.rdi.ibdev); |
77241056 MM |
993 | } |
994 | ||
995 | /* | |
996 | * Allocate our primary per-unit data structure. Must be done via verbs | |
997 | * allocator, because the verbs cleanup process both does cleanup and | |
998 | * free of the data structure. | |
999 | * "extra" is for chip-specific data. | |
1000 | * | |
1001 | * Use the idr mechanism to get a unit number for this unit. | |
1002 | */ | |
1003 | struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra) | |
1004 | { | |
1005 | unsigned long flags; | |
1006 | struct hfi1_devdata *dd; | |
7af6d006 DD |
1007 | int ret, nports; |
1008 | ||
1009 | /* extra is * number of ports */ | |
1010 | nports = extra / sizeof(struct hfi1_pportdata); | |
77241056 | 1011 | |
7af6d006 DD |
1012 | dd = (struct hfi1_devdata *)rvt_alloc_device(sizeof(*dd) + extra, |
1013 | nports); | |
77241056 MM |
1014 | if (!dd) |
1015 | return ERR_PTR(-ENOMEM); | |
7af6d006 | 1016 | dd->num_pports = nports; |
77241056 MM |
1017 | dd->pport = (struct hfi1_pportdata *)(dd + 1); |
1018 | ||
1019 | INIT_LIST_HEAD(&dd->list); | |
77241056 MM |
1020 | idr_preload(GFP_KERNEL); |
1021 | spin_lock_irqsave(&hfi1_devs_lock, flags); | |
1022 | ||
1023 | ret = idr_alloc(&hfi1_unit_table, dd, 0, 0, GFP_NOWAIT); | |
1024 | if (ret >= 0) { | |
1025 | dd->unit = ret; | |
1026 | list_add(&dd->list, &hfi1_dev_list); | |
1027 | } | |
1028 | ||
1029 | spin_unlock_irqrestore(&hfi1_devs_lock, flags); | |
1030 | idr_preload_end(); | |
1031 | ||
1032 | if (ret < 0) { | |
1033 | hfi1_early_err(&pdev->dev, | |
1034 | "Could not allocate unit ID: error %d\n", -ret); | |
1035 | goto bail; | |
1036 | } | |
1037 | /* | |
1038 | * Initialize all locks for the device. This needs to be as early as | |
1039 | * possible so locks are usable. | |
1040 | */ | |
1041 | spin_lock_init(&dd->sc_lock); | |
1042 | spin_lock_init(&dd->sendctrl_lock); | |
1043 | spin_lock_init(&dd->rcvctrl_lock); | |
1044 | spin_lock_init(&dd->uctxt_lock); | |
1045 | spin_lock_init(&dd->hfi1_diag_trans_lock); | |
1046 | spin_lock_init(&dd->sc_init_lock); | |
1047 | spin_lock_init(&dd->dc8051_lock); | |
1048 | spin_lock_init(&dd->dc8051_memlock); | |
1049 | mutex_init(&dd->qsfp_i2c_mutex); | |
1050 | seqlock_init(&dd->sc2vl_lock); | |
1051 | spin_lock_init(&dd->sde_map_lock); | |
1052 | init_waitqueue_head(&dd->event_queue); | |
1053 | ||
1054 | dd->int_counter = alloc_percpu(u64); | |
1055 | if (!dd->int_counter) { | |
1056 | ret = -ENOMEM; | |
1057 | hfi1_early_err(&pdev->dev, | |
1058 | "Could not allocate per-cpu int_counter\n"); | |
1059 | goto bail; | |
1060 | } | |
1061 | ||
1062 | dd->rcv_limit = alloc_percpu(u64); | |
1063 | if (!dd->rcv_limit) { | |
1064 | ret = -ENOMEM; | |
1065 | hfi1_early_err(&pdev->dev, | |
1066 | "Could not allocate per-cpu rcv_limit\n"); | |
1067 | goto bail; | |
1068 | } | |
1069 | ||
89abfc8d VM |
1070 | dd->send_schedule = alloc_percpu(u64); |
1071 | if (!dd->send_schedule) { | |
1072 | ret = -ENOMEM; | |
1073 | hfi1_early_err(&pdev->dev, | |
1074 | "Could not allocate per-cpu int_counter\n"); | |
1075 | goto bail; | |
1076 | } | |
1077 | ||
77241056 MM |
1078 | if (!hfi1_cpulist_count) { |
1079 | u32 count = num_online_cpus(); | |
1080 | ||
314fcc0d SB |
1081 | hfi1_cpulist = kcalloc(BITS_TO_LONGS(count), sizeof(long), |
1082 | GFP_KERNEL); | |
77241056 MM |
1083 | if (hfi1_cpulist) |
1084 | hfi1_cpulist_count = count; | |
1085 | else | |
1086 | hfi1_early_err( | |
1087 | &pdev->dev, | |
1088 | "Could not alloc cpulist info, cpu affinity might be wrong\n"); | |
1089 | } | |
1090 | hfi1_dbg_ibdev_init(&dd->verbs_dev); | |
1091 | return dd; | |
1092 | ||
1093 | bail: | |
1094 | if (!list_empty(&dd->list)) | |
1095 | list_del_init(&dd->list); | |
ec3f2c12 | 1096 | ib_dealloc_device(&dd->verbs_dev.rdi.ibdev); |
77241056 MM |
1097 | return ERR_PTR(ret); |
1098 | } | |
1099 | ||
1100 | /* | |
1101 | * Called from freeze mode handlers, and from PCI error | |
1102 | * reporting code. Should be paranoid about state of | |
1103 | * system and data structures. | |
1104 | */ | |
1105 | void hfi1_disable_after_error(struct hfi1_devdata *dd) | |
1106 | { | |
1107 | if (dd->flags & HFI1_INITTED) { | |
1108 | u32 pidx; | |
1109 | ||
1110 | dd->flags &= ~HFI1_INITTED; | |
1111 | if (dd->pport) | |
1112 | for (pidx = 0; pidx < dd->num_pports; ++pidx) { | |
1113 | struct hfi1_pportdata *ppd; | |
1114 | ||
1115 | ppd = dd->pport + pidx; | |
1116 | if (dd->flags & HFI1_PRESENT) | |
1117 | set_link_state(ppd, HLS_DN_DISABLE); | |
1118 | ||
1119 | if (ppd->statusp) | |
1120 | *ppd->statusp &= ~HFI1_STATUS_IB_READY; | |
1121 | } | |
1122 | } | |
1123 | ||
1124 | /* | |
1125 | * Mark as having had an error for driver, and also | |
1126 | * for /sys and status word mapped to user programs. | |
1127 | * This marks unit as not usable, until reset. | |
1128 | */ | |
1129 | if (dd->status) | |
1130 | dd->status->dev |= HFI1_STATUS_HWERROR; | |
1131 | } | |
1132 | ||
1133 | static void remove_one(struct pci_dev *); | |
1134 | static int init_one(struct pci_dev *, const struct pci_device_id *); | |
1135 | ||
1136 | #define DRIVER_LOAD_MSG "Intel " DRIVER_NAME " loaded: " | |
1137 | #define PFX DRIVER_NAME ": " | |
1138 | ||
1139 | static const struct pci_device_id hfi1_pci_tbl[] = { | |
1140 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL0) }, | |
1141 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL1) }, | |
1142 | { 0, } | |
1143 | }; | |
1144 | ||
1145 | MODULE_DEVICE_TABLE(pci, hfi1_pci_tbl); | |
1146 | ||
1147 | static struct pci_driver hfi1_pci_driver = { | |
1148 | .name = DRIVER_NAME, | |
1149 | .probe = init_one, | |
1150 | .remove = remove_one, | |
1151 | .id_table = hfi1_pci_tbl, | |
1152 | .err_handler = &hfi1_pci_err_handler, | |
1153 | }; | |
1154 | ||
1155 | static void __init compute_krcvqs(void) | |
1156 | { | |
1157 | int i; | |
1158 | ||
1159 | for (i = 0; i < krcvqsset; i++) | |
1160 | n_krcvqs += krcvqs[i]; | |
1161 | } | |
1162 | ||
1163 | /* | |
1164 | * Do all the generic driver unit- and chip-independent memory | |
1165 | * allocation and initialization. | |
1166 | */ | |
1167 | static int __init hfi1_mod_init(void) | |
1168 | { | |
1169 | int ret; | |
1170 | ||
1171 | ret = dev_init(); | |
1172 | if (ret) | |
1173 | goto bail; | |
1174 | ||
1175 | /* validate max MTU before any devices start */ | |
1176 | if (!valid_opa_max_mtu(hfi1_max_mtu)) { | |
1177 | pr_err("Invalid max_mtu 0x%x, using 0x%x instead\n", | |
1178 | hfi1_max_mtu, HFI1_DEFAULT_MAX_MTU); | |
1179 | hfi1_max_mtu = HFI1_DEFAULT_MAX_MTU; | |
1180 | } | |
1181 | /* valid CUs run from 1-128 in powers of 2 */ | |
1182 | if (hfi1_cu > 128 || !is_power_of_2(hfi1_cu)) | |
1183 | hfi1_cu = 1; | |
1184 | /* valid credit return threshold is 0-100, variable is unsigned */ | |
1185 | if (user_credit_return_threshold > 100) | |
1186 | user_credit_return_threshold = 100; | |
1187 | ||
1188 | compute_krcvqs(); | |
1189 | /* sanitize receive interrupt count, time must wait until after | |
1190 | the hardware type is known */ | |
1191 | if (rcv_intr_count > RCV_HDR_HEAD_COUNTER_MASK) | |
1192 | rcv_intr_count = RCV_HDR_HEAD_COUNTER_MASK; | |
1193 | /* reject invalid combinations */ | |
1194 | if (rcv_intr_count == 0 && rcv_intr_timeout == 0) { | |
1195 | pr_err("Invalid mode: both receive interrupt count and available timeout are zero - setting interrupt count to 1\n"); | |
1196 | rcv_intr_count = 1; | |
1197 | } | |
1198 | if (rcv_intr_count > 1 && rcv_intr_timeout == 0) { | |
1199 | /* | |
1200 | * Avoid indefinite packet delivery by requiring a timeout | |
1201 | * if count is > 1. | |
1202 | */ | |
1203 | pr_err("Invalid mode: receive interrupt count greater than 1 and available timeout is zero - setting available timeout to 1\n"); | |
1204 | rcv_intr_timeout = 1; | |
1205 | } | |
1206 | if (rcv_intr_dynamic && !(rcv_intr_count > 1 && rcv_intr_timeout > 0)) { | |
1207 | /* | |
1208 | * The dynamic algorithm expects a non-zero timeout | |
1209 | * and a count > 1. | |
1210 | */ | |
1211 | pr_err("Invalid mode: dynamic receive interrupt mitigation with invalid count and timeout - turning dynamic off\n"); | |
1212 | rcv_intr_dynamic = 0; | |
1213 | } | |
1214 | ||
1215 | /* sanitize link CRC options */ | |
1216 | link_crc_mask &= SUPPORTED_CRCS; | |
1217 | ||
1218 | /* | |
1219 | * These must be called before the driver is registered with | |
1220 | * the PCI subsystem. | |
1221 | */ | |
1222 | idr_init(&hfi1_unit_table); | |
1223 | ||
1224 | hfi1_dbg_init(); | |
1225 | ret = pci_register_driver(&hfi1_pci_driver); | |
1226 | if (ret < 0) { | |
1227 | pr_err("Unable to register driver: error %d\n", -ret); | |
1228 | goto bail_dev; | |
1229 | } | |
1230 | goto bail; /* all OK */ | |
1231 | ||
1232 | bail_dev: | |
1233 | hfi1_dbg_exit(); | |
1234 | idr_destroy(&hfi1_unit_table); | |
1235 | dev_cleanup(); | |
1236 | bail: | |
1237 | return ret; | |
1238 | } | |
1239 | ||
1240 | module_init(hfi1_mod_init); | |
1241 | ||
1242 | /* | |
1243 | * Do the non-unit driver cleanup, memory free, etc. at unload. | |
1244 | */ | |
1245 | static void __exit hfi1_mod_cleanup(void) | |
1246 | { | |
1247 | pci_unregister_driver(&hfi1_pci_driver); | |
1248 | hfi1_dbg_exit(); | |
1249 | hfi1_cpulist_count = 0; | |
1250 | kfree(hfi1_cpulist); | |
1251 | ||
1252 | idr_destroy(&hfi1_unit_table); | |
1253 | dispose_firmware(); /* asymmetric with obtain_firmware() */ | |
1254 | dev_cleanup(); | |
1255 | } | |
1256 | ||
1257 | module_exit(hfi1_mod_cleanup); | |
1258 | ||
1259 | /* this can only be called after a successful initialization */ | |
1260 | static void cleanup_device_data(struct hfi1_devdata *dd) | |
1261 | { | |
1262 | int ctxt; | |
1263 | int pidx; | |
1264 | struct hfi1_ctxtdata **tmp; | |
1265 | unsigned long flags; | |
1266 | ||
1267 | /* users can't do anything more with chip */ | |
1268 | for (pidx = 0; pidx < dd->num_pports; ++pidx) { | |
1269 | struct hfi1_pportdata *ppd = &dd->pport[pidx]; | |
1270 | struct cc_state *cc_state; | |
1271 | int i; | |
1272 | ||
1273 | if (ppd->statusp) | |
1274 | *ppd->statusp &= ~HFI1_STATUS_CHIP_PRESENT; | |
1275 | ||
1276 | for (i = 0; i < OPA_MAX_SLS; i++) | |
1277 | hrtimer_cancel(&ppd->cca_timer[i].hrtimer); | |
1278 | ||
1279 | spin_lock(&ppd->cc_state_lock); | |
1280 | cc_state = get_cc_state(ppd); | |
1281 | rcu_assign_pointer(ppd->cc_state, NULL); | |
1282 | spin_unlock(&ppd->cc_state_lock); | |
1283 | ||
1284 | if (cc_state) | |
1285 | call_rcu(&cc_state->rcu, cc_state_reclaim); | |
1286 | } | |
1287 | ||
1288 | free_credit_return(dd); | |
1289 | ||
1290 | /* | |
1291 | * Free any resources still in use (usually just kernel contexts) | |
1292 | * at unload; we do for ctxtcnt, because that's what we allocate. | |
1293 | * We acquire lock to be really paranoid that rcd isn't being | |
1294 | * accessed from some interrupt-related code (that should not happen, | |
1295 | * but best to be sure). | |
1296 | */ | |
1297 | spin_lock_irqsave(&dd->uctxt_lock, flags); | |
1298 | tmp = dd->rcd; | |
1299 | dd->rcd = NULL; | |
1300 | spin_unlock_irqrestore(&dd->uctxt_lock, flags); | |
46b010d3 MB |
1301 | |
1302 | if (dd->rcvhdrtail_dummy_kvaddr) { | |
1303 | dma_free_coherent(&dd->pcidev->dev, sizeof(u64), | |
1304 | (void *)dd->rcvhdrtail_dummy_kvaddr, | |
1305 | dd->rcvhdrtail_dummy_physaddr); | |
1306 | dd->rcvhdrtail_dummy_kvaddr = NULL; | |
1307 | } | |
1308 | ||
77241056 MM |
1309 | for (ctxt = 0; tmp && ctxt < dd->num_rcv_contexts; ctxt++) { |
1310 | struct hfi1_ctxtdata *rcd = tmp[ctxt]; | |
1311 | ||
1312 | tmp[ctxt] = NULL; /* debugging paranoia */ | |
1313 | if (rcd) { | |
1314 | hfi1_clear_tids(rcd); | |
1315 | hfi1_free_ctxtdata(dd, rcd); | |
1316 | } | |
1317 | } | |
1318 | kfree(tmp); | |
1319 | /* must follow rcv context free - need to remove rcv's hooks */ | |
1320 | for (ctxt = 0; ctxt < dd->num_send_contexts; ctxt++) | |
1321 | sc_free(dd->send_contexts[ctxt].sc); | |
1322 | dd->num_send_contexts = 0; | |
1323 | kfree(dd->send_contexts); | |
1324 | dd->send_contexts = NULL; | |
1325 | kfree(dd->boardname); | |
1326 | vfree(dd->events); | |
1327 | vfree(dd->status); | |
77241056 MM |
1328 | } |
1329 | ||
1330 | /* | |
1331 | * Clean up on unit shutdown, or error during unit load after | |
1332 | * successful initialization. | |
1333 | */ | |
1334 | static void postinit_cleanup(struct hfi1_devdata *dd) | |
1335 | { | |
1336 | hfi1_start_cleanup(dd); | |
1337 | ||
1338 | hfi1_pcie_ddcleanup(dd); | |
1339 | hfi1_pcie_cleanup(dd->pcidev); | |
1340 | ||
1341 | cleanup_device_data(dd); | |
1342 | ||
1343 | hfi1_free_devdata(dd); | |
1344 | } | |
1345 | ||
1346 | static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |
1347 | { | |
1348 | int ret = 0, j, pidx, initfail; | |
1349 | struct hfi1_devdata *dd = NULL; | |
e8597eb0 | 1350 | struct hfi1_pportdata *ppd; |
77241056 MM |
1351 | |
1352 | /* First, lock the non-writable module parameters */ | |
1353 | HFI1_CAP_LOCK(); | |
1354 | ||
1355 | /* Validate some global module parameters */ | |
1356 | if (rcvhdrcnt <= HFI1_MIN_HDRQ_EGRBUF_CNT) { | |
1357 | hfi1_early_err(&pdev->dev, "Header queue count too small\n"); | |
1358 | ret = -EINVAL; | |
1359 | goto bail; | |
1360 | } | |
e002dcc0 SS |
1361 | if (rcvhdrcnt > HFI1_MAX_HDRQ_EGRBUF_CNT) { |
1362 | hfi1_early_err(&pdev->dev, | |
1363 | "Receive header queue count cannot be greater than %u\n", | |
1364 | HFI1_MAX_HDRQ_EGRBUF_CNT); | |
1365 | ret = -EINVAL; | |
1366 | goto bail; | |
1367 | } | |
77241056 MM |
1368 | /* use the encoding function as a sanitization check */ |
1369 | if (!encode_rcv_header_entry_size(hfi1_hdrq_entsize)) { | |
1370 | hfi1_early_err(&pdev->dev, "Invalid HdrQ Entry size %u\n", | |
1371 | hfi1_hdrq_entsize); | |
07859def | 1372 | ret = -EINVAL; |
77241056 MM |
1373 | goto bail; |
1374 | } | |
1375 | ||
1376 | /* The receive eager buffer size must be set before the receive | |
1377 | * contexts are created. | |
1378 | * | |
1379 | * Set the eager buffer size. Validate that it falls in a range | |
1380 | * allowed by the hardware - all powers of 2 between the min and | |
1381 | * max. The maximum valid MTU is within the eager buffer range | |
1382 | * so we do not need to cap the max_mtu by an eager buffer size | |
1383 | * setting. | |
1384 | */ | |
1385 | if (eager_buffer_size) { | |
1386 | if (!is_power_of_2(eager_buffer_size)) | |
1387 | eager_buffer_size = | |
1388 | roundup_pow_of_two(eager_buffer_size); | |
1389 | eager_buffer_size = | |
1390 | clamp_val(eager_buffer_size, | |
1391 | MIN_EAGER_BUFFER * 8, | |
1392 | MAX_EAGER_BUFFER_TOTAL); | |
1393 | hfi1_early_info(&pdev->dev, "Eager buffer size %u\n", | |
1394 | eager_buffer_size); | |
1395 | } else { | |
1396 | hfi1_early_err(&pdev->dev, "Invalid Eager buffer size of 0\n"); | |
1397 | ret = -EINVAL; | |
1398 | goto bail; | |
1399 | } | |
1400 | ||
1401 | /* restrict value of hfi1_rcvarr_split */ | |
1402 | hfi1_rcvarr_split = clamp_val(hfi1_rcvarr_split, 0, 100); | |
1403 | ||
1404 | ret = hfi1_pcie_init(pdev, ent); | |
1405 | if (ret) | |
1406 | goto bail; | |
1407 | ||
1408 | /* | |
1409 | * Do device-specific initialization, function table setup, dd | |
1410 | * allocation, etc. | |
1411 | */ | |
1412 | switch (ent->device) { | |
1413 | case PCI_DEVICE_ID_INTEL0: | |
1414 | case PCI_DEVICE_ID_INTEL1: | |
1415 | dd = hfi1_init_dd(pdev, ent); | |
1416 | break; | |
1417 | default: | |
1418 | hfi1_early_err(&pdev->dev, | |
1419 | "Failing on unknown Intel deviceid 0x%x\n", | |
1420 | ent->device); | |
1421 | ret = -ENODEV; | |
1422 | } | |
1423 | ||
1424 | if (IS_ERR(dd)) | |
1425 | ret = PTR_ERR(dd); | |
1426 | if (ret) | |
1427 | goto clean_bail; /* error already printed */ | |
1428 | ||
1429 | ret = create_workqueues(dd); | |
1430 | if (ret) | |
1431 | goto clean_bail; | |
1432 | ||
1433 | /* do the generic initialization */ | |
1434 | initfail = hfi1_init(dd, 0); | |
1435 | ||
1436 | ret = hfi1_register_ib_device(dd); | |
1437 | ||
1438 | /* | |
1439 | * Now ready for use. this should be cleared whenever we | |
1440 | * detect a reset, or initiate one. If earlier failure, | |
1441 | * we still create devices, so diags, etc. can be used | |
1442 | * to determine cause of problem. | |
1443 | */ | |
1444 | if (!initfail && !ret) | |
1445 | dd->flags |= HFI1_INITTED; | |
1446 | ||
1447 | j = hfi1_device_create(dd); | |
1448 | if (j) | |
1449 | dd_dev_err(dd, "Failed to create /dev devices: %d\n", -j); | |
1450 | ||
1451 | if (initfail || ret) { | |
1452 | stop_timers(dd); | |
1453 | flush_workqueue(ib_wq); | |
e8597eb0 | 1454 | for (pidx = 0; pidx < dd->num_pports; ++pidx) { |
77241056 | 1455 | hfi1_quiet_serdes(dd->pport + pidx); |
e8597eb0 HC |
1456 | ppd = dd->pport + pidx; |
1457 | if (ppd->hfi1_wq) { | |
1458 | destroy_workqueue(ppd->hfi1_wq); | |
1459 | ppd->hfi1_wq = NULL; | |
1460 | } | |
1461 | } | |
77241056 MM |
1462 | if (!j) |
1463 | hfi1_device_remove(dd); | |
1464 | if (!ret) | |
1465 | hfi1_unregister_ib_device(dd); | |
1466 | postinit_cleanup(dd); | |
1467 | if (initfail) | |
1468 | ret = initfail; | |
1469 | goto bail; /* everything already cleaned */ | |
1470 | } | |
1471 | ||
1472 | sdma_start(dd); | |
1473 | ||
1474 | return 0; | |
1475 | ||
1476 | clean_bail: | |
1477 | hfi1_pcie_cleanup(pdev); | |
1478 | bail: | |
1479 | return ret; | |
1480 | } | |
1481 | ||
1482 | static void remove_one(struct pci_dev *pdev) | |
1483 | { | |
1484 | struct hfi1_devdata *dd = pci_get_drvdata(pdev); | |
1485 | ||
1486 | /* unregister from IB core */ | |
1487 | hfi1_unregister_ib_device(dd); | |
1488 | ||
1489 | /* | |
1490 | * Disable the IB link, disable interrupts on the device, | |
1491 | * clear dma engines, etc. | |
1492 | */ | |
1493 | shutdown_device(dd); | |
1494 | ||
1495 | stop_timers(dd); | |
1496 | ||
1497 | /* wait until all of our (qsfp) queue_work() calls complete */ | |
1498 | flush_workqueue(ib_wq); | |
1499 | ||
1500 | hfi1_device_remove(dd); | |
1501 | ||
1502 | postinit_cleanup(dd); | |
1503 | } | |
1504 | ||
1505 | /** | |
1506 | * hfi1_create_rcvhdrq - create a receive header queue | |
1507 | * @dd: the hfi1_ib device | |
1508 | * @rcd: the context data | |
1509 | * | |
1510 | * This must be contiguous memory (from an i/o perspective), and must be | |
1511 | * DMA'able (which means for some systems, it will go through an IOMMU, | |
1512 | * or be forced into a low address range). | |
1513 | */ | |
1514 | int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd) | |
1515 | { | |
1516 | unsigned amt; | |
1517 | u64 reg; | |
1518 | ||
1519 | if (!rcd->rcvhdrq) { | |
1520 | dma_addr_t phys_hdrqtail; | |
1521 | gfp_t gfp_flags; | |
1522 | ||
1523 | /* | |
1524 | * rcvhdrqentsize is in DWs, so we have to convert to bytes | |
1525 | * (* sizeof(u32)). | |
1526 | */ | |
1527 | amt = ALIGN(rcd->rcvhdrq_cnt * rcd->rcvhdrqentsize * | |
1528 | sizeof(u32), PAGE_SIZE); | |
1529 | ||
1530 | gfp_flags = (rcd->ctxt >= dd->first_user_ctxt) ? | |
1531 | GFP_USER : GFP_KERNEL; | |
1532 | rcd->rcvhdrq = dma_zalloc_coherent( | |
1533 | &dd->pcidev->dev, amt, &rcd->rcvhdrq_phys, | |
1534 | gfp_flags | __GFP_COMP); | |
1535 | ||
1536 | if (!rcd->rcvhdrq) { | |
1537 | dd_dev_err(dd, | |
1538 | "attempt to allocate %d bytes for ctxt %u rcvhdrq failed\n", | |
1539 | amt, rcd->ctxt); | |
1540 | goto bail; | |
1541 | } | |
1542 | ||
77241056 MM |
1543 | if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL)) { |
1544 | rcd->rcvhdrtail_kvaddr = dma_zalloc_coherent( | |
1545 | &dd->pcidev->dev, PAGE_SIZE, &phys_hdrqtail, | |
1546 | gfp_flags); | |
1547 | if (!rcd->rcvhdrtail_kvaddr) | |
1548 | goto bail_free; | |
1549 | rcd->rcvhdrqtailaddr_phys = phys_hdrqtail; | |
1550 | } | |
1551 | ||
1552 | rcd->rcvhdrq_size = amt; | |
1553 | } | |
1554 | /* | |
1555 | * These values are per-context: | |
1556 | * RcvHdrCnt | |
1557 | * RcvHdrEntSize | |
1558 | * RcvHdrSize | |
1559 | */ | |
1560 | reg = ((u64)(rcd->rcvhdrq_cnt >> HDRQ_SIZE_SHIFT) | |
1561 | & RCV_HDR_CNT_CNT_MASK) | |
1562 | << RCV_HDR_CNT_CNT_SHIFT; | |
1563 | write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_CNT, reg); | |
1564 | reg = (encode_rcv_header_entry_size(rcd->rcvhdrqentsize) | |
1565 | & RCV_HDR_ENT_SIZE_ENT_SIZE_MASK) | |
1566 | << RCV_HDR_ENT_SIZE_ENT_SIZE_SHIFT; | |
1567 | write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_ENT_SIZE, reg); | |
1568 | reg = (dd->rcvhdrsize & RCV_HDR_SIZE_HDR_SIZE_MASK) | |
1569 | << RCV_HDR_SIZE_HDR_SIZE_SHIFT; | |
1570 | write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_SIZE, reg); | |
46b010d3 MB |
1571 | |
1572 | /* | |
1573 | * Program dummy tail address for every receive context | |
1574 | * before enabling any receive context | |
1575 | */ | |
1576 | write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_TAIL_ADDR, | |
1577 | dd->rcvhdrtail_dummy_physaddr); | |
1578 | ||
77241056 MM |
1579 | return 0; |
1580 | ||
1581 | bail_free: | |
1582 | dd_dev_err(dd, | |
1583 | "attempt to allocate 1 page for ctxt %u rcvhdrqtailaddr failed\n", | |
1584 | rcd->ctxt); | |
1585 | vfree(rcd->user_event_mask); | |
1586 | rcd->user_event_mask = NULL; | |
1587 | dma_free_coherent(&dd->pcidev->dev, amt, rcd->rcvhdrq, | |
1588 | rcd->rcvhdrq_phys); | |
1589 | rcd->rcvhdrq = NULL; | |
1590 | bail: | |
1591 | return -ENOMEM; | |
1592 | } | |
1593 | ||
1594 | /** | |
1595 | * allocate eager buffers, both kernel and user contexts. | |
1596 | * @rcd: the context we are setting up. | |
1597 | * | |
1598 | * Allocate the eager TID buffers and program them into hip. | |
1599 | * They are no longer completely contiguous, we do multiple allocation | |
1600 | * calls. Otherwise we get the OOM code involved, by asking for too | |
1601 | * much per call, with disastrous results on some kernels. | |
1602 | */ | |
1603 | int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd) | |
1604 | { | |
1605 | struct hfi1_devdata *dd = rcd->dd; | |
1606 | u32 max_entries, egrtop, alloced_bytes = 0, idx = 0; | |
1607 | gfp_t gfp_flags; | |
1608 | u16 order; | |
1609 | int ret = 0; | |
1610 | u16 round_mtu = roundup_pow_of_two(hfi1_max_mtu); | |
1611 | ||
1612 | /* | |
1613 | * GFP_USER, but without GFP_FS, so buffer cache can be | |
1614 | * coalesced (we hope); otherwise, even at order 4, | |
1615 | * heavy filesystem activity makes these fail, and we can | |
1616 | * use compound pages. | |
1617 | */ | |
71baba4b | 1618 | gfp_flags = __GFP_RECLAIM | __GFP_IO | __GFP_COMP; |
77241056 MM |
1619 | |
1620 | /* | |
1621 | * The minimum size of the eager buffers is a groups of MTU-sized | |
1622 | * buffers. | |
1623 | * The global eager_buffer_size parameter is checked against the | |
1624 | * theoretical lower limit of the value. Here, we check against the | |
1625 | * MTU. | |
1626 | */ | |
1627 | if (rcd->egrbufs.size < (round_mtu * dd->rcv_entries.group_size)) | |
1628 | rcd->egrbufs.size = round_mtu * dd->rcv_entries.group_size; | |
1629 | /* | |
1630 | * If using one-pkt-per-egr-buffer, lower the eager buffer | |
1631 | * size to the max MTU (page-aligned). | |
1632 | */ | |
1633 | if (!HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR)) | |
1634 | rcd->egrbufs.rcvtid_size = round_mtu; | |
1635 | ||
1636 | /* | |
1637 | * Eager buffers sizes of 1MB or less require smaller TID sizes | |
1638 | * to satisfy the "multiple of 8 RcvArray entries" requirement. | |
1639 | */ | |
1640 | if (rcd->egrbufs.size <= (1 << 20)) | |
1641 | rcd->egrbufs.rcvtid_size = max((unsigned long)round_mtu, | |
1642 | rounddown_pow_of_two(rcd->egrbufs.size / 8)); | |
1643 | ||
1644 | while (alloced_bytes < rcd->egrbufs.size && | |
1645 | rcd->egrbufs.alloced < rcd->egrbufs.count) { | |
1646 | rcd->egrbufs.buffers[idx].addr = | |
1647 | dma_zalloc_coherent(&dd->pcidev->dev, | |
1648 | rcd->egrbufs.rcvtid_size, | |
1649 | &rcd->egrbufs.buffers[idx].phys, | |
1650 | gfp_flags); | |
1651 | if (rcd->egrbufs.buffers[idx].addr) { | |
1652 | rcd->egrbufs.buffers[idx].len = | |
1653 | rcd->egrbufs.rcvtid_size; | |
1654 | rcd->egrbufs.rcvtids[rcd->egrbufs.alloced].addr = | |
1655 | rcd->egrbufs.buffers[idx].addr; | |
1656 | rcd->egrbufs.rcvtids[rcd->egrbufs.alloced].phys = | |
1657 | rcd->egrbufs.buffers[idx].phys; | |
1658 | rcd->egrbufs.alloced++; | |
1659 | alloced_bytes += rcd->egrbufs.rcvtid_size; | |
1660 | idx++; | |
1661 | } else { | |
1662 | u32 new_size, i, j; | |
1663 | u64 offset = 0; | |
1664 | ||
1665 | /* | |
1666 | * Fail the eager buffer allocation if: | |
1667 | * - we are already using the lowest acceptable size | |
1668 | * - we are using one-pkt-per-egr-buffer (this implies | |
1669 | * that we are accepting only one size) | |
1670 | */ | |
1671 | if (rcd->egrbufs.rcvtid_size == round_mtu || | |
1672 | !HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR)) { | |
1673 | dd_dev_err(dd, "ctxt%u: Failed to allocate eager buffers\n", | |
1674 | rcd->ctxt); | |
1675 | goto bail_rcvegrbuf_phys; | |
1676 | } | |
1677 | ||
1678 | new_size = rcd->egrbufs.rcvtid_size / 2; | |
1679 | ||
1680 | /* | |
1681 | * If the first attempt to allocate memory failed, don't | |
1682 | * fail everything but continue with the next lower | |
1683 | * size. | |
1684 | */ | |
1685 | if (idx == 0) { | |
1686 | rcd->egrbufs.rcvtid_size = new_size; | |
1687 | continue; | |
1688 | } | |
1689 | ||
1690 | /* | |
1691 | * Re-partition already allocated buffers to a smaller | |
1692 | * size. | |
1693 | */ | |
1694 | rcd->egrbufs.alloced = 0; | |
1695 | for (i = 0, j = 0, offset = 0; j < idx; i++) { | |
1696 | if (i >= rcd->egrbufs.count) | |
1697 | break; | |
1698 | rcd->egrbufs.rcvtids[i].phys = | |
1699 | rcd->egrbufs.buffers[j].phys + offset; | |
1700 | rcd->egrbufs.rcvtids[i].addr = | |
1701 | rcd->egrbufs.buffers[j].addr + offset; | |
1702 | rcd->egrbufs.alloced++; | |
1703 | if ((rcd->egrbufs.buffers[j].phys + offset + | |
1704 | new_size) == | |
1705 | (rcd->egrbufs.buffers[j].phys + | |
1706 | rcd->egrbufs.buffers[j].len)) { | |
1707 | j++; | |
1708 | offset = 0; | |
1709 | } else | |
1710 | offset += new_size; | |
1711 | } | |
1712 | rcd->egrbufs.rcvtid_size = new_size; | |
1713 | } | |
1714 | } | |
1715 | rcd->egrbufs.numbufs = idx; | |
1716 | rcd->egrbufs.size = alloced_bytes; | |
1717 | ||
6c63e423 SS |
1718 | hfi1_cdbg(PROC, |
1719 | "ctxt%u: Alloced %u rcv tid entries @ %uKB, total %zuKB\n", | |
1720 | rcd->ctxt, rcd->egrbufs.alloced, rcd->egrbufs.rcvtid_size, | |
1721 | rcd->egrbufs.size); | |
1722 | ||
77241056 MM |
1723 | |
1724 | /* | |
1725 | * Set the contexts rcv array head update threshold to the closest | |
1726 | * power of 2 (so we can use a mask instead of modulo) below half | |
1727 | * the allocated entries. | |
1728 | */ | |
1729 | rcd->egrbufs.threshold = | |
1730 | rounddown_pow_of_two(rcd->egrbufs.alloced / 2); | |
1731 | /* | |
1732 | * Compute the expected RcvArray entry base. This is done after | |
1733 | * allocating the eager buffers in order to maximize the | |
1734 | * expected RcvArray entries for the context. | |
1735 | */ | |
1736 | max_entries = rcd->rcv_array_groups * dd->rcv_entries.group_size; | |
1737 | egrtop = roundup(rcd->egrbufs.alloced, dd->rcv_entries.group_size); | |
1738 | rcd->expected_count = max_entries - egrtop; | |
1739 | if (rcd->expected_count > MAX_TID_PAIR_ENTRIES * 2) | |
1740 | rcd->expected_count = MAX_TID_PAIR_ENTRIES * 2; | |
1741 | ||
1742 | rcd->expected_base = rcd->eager_base + egrtop; | |
6c63e423 SS |
1743 | hfi1_cdbg(PROC, "ctxt%u: eager:%u, exp:%u, egrbase:%u, expbase:%u\n", |
1744 | rcd->ctxt, rcd->egrbufs.alloced, rcd->expected_count, | |
1745 | rcd->eager_base, rcd->expected_base); | |
77241056 MM |
1746 | |
1747 | if (!hfi1_rcvbuf_validate(rcd->egrbufs.rcvtid_size, PT_EAGER, &order)) { | |
6c63e423 SS |
1748 | hfi1_cdbg(PROC, |
1749 | "ctxt%u: current Eager buffer size is invalid %u\n", | |
1750 | rcd->ctxt, rcd->egrbufs.rcvtid_size); | |
77241056 MM |
1751 | ret = -EINVAL; |
1752 | goto bail; | |
1753 | } | |
1754 | ||
1755 | for (idx = 0; idx < rcd->egrbufs.alloced; idx++) { | |
1756 | hfi1_put_tid(dd, rcd->eager_base + idx, PT_EAGER, | |
1757 | rcd->egrbufs.rcvtids[idx].phys, order); | |
1758 | cond_resched(); | |
1759 | } | |
1760 | goto bail; | |
1761 | ||
1762 | bail_rcvegrbuf_phys: | |
1763 | for (idx = 0; idx < rcd->egrbufs.alloced && | |
1764 | rcd->egrbufs.buffers[idx].addr; | |
1765 | idx++) { | |
1766 | dma_free_coherent(&dd->pcidev->dev, | |
1767 | rcd->egrbufs.buffers[idx].len, | |
1768 | rcd->egrbufs.buffers[idx].addr, | |
1769 | rcd->egrbufs.buffers[idx].phys); | |
1770 | rcd->egrbufs.buffers[idx].addr = NULL; | |
1771 | rcd->egrbufs.buffers[idx].phys = 0; | |
1772 | rcd->egrbufs.buffers[idx].len = 0; | |
1773 | } | |
1774 | bail: | |
1775 | return ret; | |
1776 | } |