2 * Copyright(c) 2015, 2016 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
47 #include <linux/poll.h>
48 #include <linux/cdev.h>
49 #include <linux/vmalloc.h>
59 #include "user_sdma.h"
60 #include "user_exp_rcv.h"
66 #define pr_fmt(fmt) DRIVER_NAME ": " fmt
68 #define SEND_CTXT_HALT_TIMEOUT 1000 /* msecs */
71 * File operation functions
73 static int hfi1_file_open(struct inode
*, struct file
*);
74 static int hfi1_file_close(struct inode
*, struct file
*);
75 static ssize_t
hfi1_write_iter(struct kiocb
*, struct iov_iter
*);
76 static unsigned int hfi1_poll(struct file
*, struct poll_table_struct
*);
77 static int hfi1_file_mmap(struct file
*, struct vm_area_struct
*);
79 static u64
kvirt_to_phys(void *);
80 static int assign_ctxt(struct file
*, struct hfi1_user_info
*);
81 static int init_subctxts(struct hfi1_ctxtdata
*, const struct hfi1_user_info
*);
82 static int user_init(struct file
*);
83 static int get_ctxt_info(struct file
*, void __user
*, __u32
);
84 static int get_base_info(struct file
*, void __user
*, __u32
);
85 static int setup_ctxt(struct file
*);
86 static int setup_subctxt(struct hfi1_ctxtdata
*);
87 static int get_user_context(struct file
*, struct hfi1_user_info
*, int);
88 static int find_shared_ctxt(struct file
*, const struct hfi1_user_info
*);
89 static int allocate_ctxt(struct file
*, struct hfi1_devdata
*,
90 struct hfi1_user_info
*);
91 static unsigned int poll_urgent(struct file
*, struct poll_table_struct
*);
92 static unsigned int poll_next(struct file
*, struct poll_table_struct
*);
93 static int user_event_ack(struct hfi1_ctxtdata
*, int, unsigned long);
94 static int set_ctxt_pkey(struct hfi1_ctxtdata
*, unsigned, u16
);
95 static int manage_rcvq(struct hfi1_ctxtdata
*, unsigned, int);
96 static int vma_fault(struct vm_area_struct
*, struct vm_fault
*);
97 static long hfi1_file_ioctl(struct file
*fp
, unsigned int cmd
,
100 static const struct file_operations hfi1_file_ops
= {
101 .owner
= THIS_MODULE
,
102 .write_iter
= hfi1_write_iter
,
103 .open
= hfi1_file_open
,
104 .release
= hfi1_file_close
,
105 .unlocked_ioctl
= hfi1_file_ioctl
,
107 .mmap
= hfi1_file_mmap
,
108 .llseek
= noop_llseek
,
111 static struct vm_operations_struct vm_ops
= {
116 * Types of memories mapped into user processes' space
135 * Masks and offsets defining the mmap tokens
137 #define HFI1_MMAP_OFFSET_MASK 0xfffULL
138 #define HFI1_MMAP_OFFSET_SHIFT 0
139 #define HFI1_MMAP_SUBCTXT_MASK 0xfULL
140 #define HFI1_MMAP_SUBCTXT_SHIFT 12
141 #define HFI1_MMAP_CTXT_MASK 0xffULL
142 #define HFI1_MMAP_CTXT_SHIFT 16
143 #define HFI1_MMAP_TYPE_MASK 0xfULL
144 #define HFI1_MMAP_TYPE_SHIFT 24
145 #define HFI1_MMAP_MAGIC_MASK 0xffffffffULL
146 #define HFI1_MMAP_MAGIC_SHIFT 32
148 #define HFI1_MMAP_MAGIC 0xdabbad00
150 #define HFI1_MMAP_TOKEN_SET(field, val) \
151 (((val) & HFI1_MMAP_##field##_MASK) << HFI1_MMAP_##field##_SHIFT)
152 #define HFI1_MMAP_TOKEN_GET(field, token) \
153 (((token) >> HFI1_MMAP_##field##_SHIFT) & HFI1_MMAP_##field##_MASK)
154 #define HFI1_MMAP_TOKEN(type, ctxt, subctxt, addr) \
155 (HFI1_MMAP_TOKEN_SET(MAGIC, HFI1_MMAP_MAGIC) | \
156 HFI1_MMAP_TOKEN_SET(TYPE, type) | \
157 HFI1_MMAP_TOKEN_SET(CTXT, ctxt) | \
158 HFI1_MMAP_TOKEN_SET(SUBCTXT, subctxt) | \
159 HFI1_MMAP_TOKEN_SET(OFFSET, (offset_in_page(addr))))
161 #define dbg(fmt, ...) \
162 pr_info(fmt, ##__VA_ARGS__)
164 static inline int is_valid_mmap(u64 token
)
166 return (HFI1_MMAP_TOKEN_GET(MAGIC
, token
) == HFI1_MMAP_MAGIC
);
169 static int hfi1_file_open(struct inode
*inode
, struct file
*fp
)
171 struct hfi1_filedata
*fd
;
172 struct hfi1_devdata
*dd
= container_of(inode
->i_cdev
,
176 /* Just take a ref now. Not all opens result in a context assign */
177 kobject_get(&dd
->kobj
);
179 /* The real work is performed later in assign_ctxt() */
181 fd
= kzalloc(sizeof(*fd
), GFP_KERNEL
);
184 fd
->rec_cpu_num
= -1; /* no cpu affinity by default */
185 fd
->mm
= current
->mm
;
186 atomic_inc(&fd
->mm
->mm_count
);
189 fp
->private_data
= fd
;
191 return fd
? 0 : -ENOMEM
;
194 static long hfi1_file_ioctl(struct file
*fp
, unsigned int cmd
,
197 struct hfi1_filedata
*fd
= fp
->private_data
;
198 struct hfi1_ctxtdata
*uctxt
= fd
->uctxt
;
199 struct hfi1_user_info uinfo
;
200 struct hfi1_tid_info tinfo
;
204 unsigned long ul_uval
= 0;
207 hfi1_cdbg(IOCTL
, "IOCTL recv: 0x%x", cmd
);
208 if (cmd
!= HFI1_IOCTL_ASSIGN_CTXT
&&
209 cmd
!= HFI1_IOCTL_GET_VERS
&&
214 case HFI1_IOCTL_ASSIGN_CTXT
:
218 if (copy_from_user(&uinfo
,
219 (struct hfi1_user_info __user
*)arg
,
223 ret
= assign_ctxt(fp
, &uinfo
);
226 ret
= setup_ctxt(fp
);
231 case HFI1_IOCTL_CTXT_INFO
:
232 ret
= get_ctxt_info(fp
, (void __user
*)(unsigned long)arg
,
233 sizeof(struct hfi1_ctxt_info
));
235 case HFI1_IOCTL_USER_INFO
:
236 ret
= get_base_info(fp
, (void __user
*)(unsigned long)arg
,
237 sizeof(struct hfi1_base_info
));
239 case HFI1_IOCTL_CREDIT_UPD
:
241 sc_return_credits(uctxt
->sc
);
244 case HFI1_IOCTL_TID_UPDATE
:
245 if (copy_from_user(&tinfo
,
246 (struct hfi11_tid_info __user
*)arg
,
250 ret
= hfi1_user_exp_rcv_setup(fp
, &tinfo
);
253 * Copy the number of tidlist entries we used
254 * and the length of the buffer we registered.
255 * These fields are adjacent in the structure so
256 * we can copy them at the same time.
258 addr
= arg
+ offsetof(struct hfi1_tid_info
, tidcnt
);
259 if (copy_to_user((void __user
*)addr
, &tinfo
.tidcnt
,
260 sizeof(tinfo
.tidcnt
) +
261 sizeof(tinfo
.length
)))
266 case HFI1_IOCTL_TID_FREE
:
267 if (copy_from_user(&tinfo
,
268 (struct hfi11_tid_info __user
*)arg
,
272 ret
= hfi1_user_exp_rcv_clear(fp
, &tinfo
);
275 addr
= arg
+ offsetof(struct hfi1_tid_info
, tidcnt
);
276 if (copy_to_user((void __user
*)addr
, &tinfo
.tidcnt
,
277 sizeof(tinfo
.tidcnt
)))
281 case HFI1_IOCTL_TID_INVAL_READ
:
282 if (copy_from_user(&tinfo
,
283 (struct hfi11_tid_info __user
*)arg
,
287 ret
= hfi1_user_exp_rcv_invalid(fp
, &tinfo
);
290 addr
= arg
+ offsetof(struct hfi1_tid_info
, tidcnt
);
291 if (copy_to_user((void __user
*)addr
, &tinfo
.tidcnt
,
292 sizeof(tinfo
.tidcnt
)))
296 case HFI1_IOCTL_RECV_CTRL
:
297 ret
= get_user(uval
, (int __user
*)arg
);
300 ret
= manage_rcvq(uctxt
, fd
->subctxt
, uval
);
303 case HFI1_IOCTL_POLL_TYPE
:
304 ret
= get_user(uval
, (int __user
*)arg
);
307 uctxt
->poll_type
= (typeof(uctxt
->poll_type
))uval
;
310 case HFI1_IOCTL_ACK_EVENT
:
311 ret
= get_user(ul_uval
, (unsigned long __user
*)arg
);
314 ret
= user_event_ack(uctxt
, fd
->subctxt
, ul_uval
);
317 case HFI1_IOCTL_SET_PKEY
:
318 ret
= get_user(uval16
, (u16 __user
*)arg
);
321 if (HFI1_CAP_IS_USET(PKEY_CHECK
))
322 ret
= set_ctxt_pkey(uctxt
, fd
->subctxt
, uval16
);
327 case HFI1_IOCTL_CTXT_RESET
: {
328 struct send_context
*sc
;
329 struct hfi1_devdata
*dd
;
331 if (!uctxt
|| !uctxt
->dd
|| !uctxt
->sc
)
335 * There is no protection here. User level has to
336 * guarantee that no one will be writing to the send
337 * context while it is being re-initialized.
338 * If user level breaks that guarantee, it will break
339 * it's own context and no one else's.
344 * Wait until the interrupt handler has marked the
345 * context as halted or frozen. Report error if we time
348 wait_event_interruptible_timeout(
349 sc
->halt_wait
, (sc
->flags
& SCF_HALTED
),
350 msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT
));
351 if (!(sc
->flags
& SCF_HALTED
))
355 * If the send context was halted due to a Freeze,
356 * wait until the device has been "unfrozen" before
357 * resetting the context.
359 if (sc
->flags
& SCF_FROZEN
) {
360 wait_event_interruptible_timeout(
362 !(ACCESS_ONCE(dd
->flags
) & HFI1_FROZEN
),
363 msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT
));
364 if (dd
->flags
& HFI1_FROZEN
)
367 if (dd
->flags
& HFI1_FORCED_FREEZE
)
369 * Don't allow context reset if we are into
376 hfi1_rcvctrl(dd
, HFI1_RCVCTRL_CTXT_ENB
,
379 ret
= sc_restart(sc
);
382 sc_return_credits(sc
);
386 case HFI1_IOCTL_GET_VERS
:
387 uval
= HFI1_USER_SWVERSION
;
388 if (put_user(uval
, (int __user
*)arg
))
399 static ssize_t
hfi1_write_iter(struct kiocb
*kiocb
, struct iov_iter
*from
)
401 struct hfi1_filedata
*fd
= kiocb
->ki_filp
->private_data
;
402 struct hfi1_user_sdma_pkt_q
*pq
= fd
->pq
;
403 struct hfi1_user_sdma_comp_q
*cq
= fd
->cq
;
404 int done
= 0, reqs
= 0;
405 unsigned long dim
= from
->nr_segs
;
410 if (!iter_is_iovec(from
) || !dim
)
413 hfi1_cdbg(SDMA
, "SDMA request from %u:%u (%lu)",
414 fd
->uctxt
->ctxt
, fd
->subctxt
, dim
);
416 if (atomic_read(&pq
->n_reqs
) == pq
->n_max_reqs
)
421 unsigned long count
= 0;
423 ret
= hfi1_user_sdma_process_request(
424 kiocb
->ki_filp
, (struct iovec
*)(from
->iov
+ done
),
438 static int hfi1_file_mmap(struct file
*fp
, struct vm_area_struct
*vma
)
440 struct hfi1_filedata
*fd
= fp
->private_data
;
441 struct hfi1_ctxtdata
*uctxt
= fd
->uctxt
;
442 struct hfi1_devdata
*dd
;
443 unsigned long flags
, pfn
;
444 u64 token
= vma
->vm_pgoff
<< PAGE_SHIFT
,
446 u8 subctxt
, mapio
= 0, vmf
= 0, type
;
451 if (!is_valid_mmap(token
) || !uctxt
||
452 !(vma
->vm_flags
& VM_SHARED
)) {
457 ctxt
= HFI1_MMAP_TOKEN_GET(CTXT
, token
);
458 subctxt
= HFI1_MMAP_TOKEN_GET(SUBCTXT
, token
);
459 type
= HFI1_MMAP_TOKEN_GET(TYPE
, token
);
460 if (ctxt
!= uctxt
->ctxt
|| subctxt
!= fd
->subctxt
) {
465 flags
= vma
->vm_flags
;
470 memaddr
= ((dd
->physaddr
+ TXE_PIO_SEND
) +
472 (uctxt
->sc
->hw_context
* BIT(16))) +
473 /* 64K PIO space / ctxt */
474 (type
== PIO_BUFS_SOP
?
475 (TXE_PIO_SIZE
/ 2) : 0); /* sop? */
477 * Map only the amount allocated to the context, not the
478 * entire available context's PIO space.
480 memlen
= PAGE_ALIGN(uctxt
->sc
->credits
* PIO_BLOCK_SIZE
);
481 flags
&= ~VM_MAYREAD
;
482 flags
|= VM_DONTCOPY
| VM_DONTEXPAND
;
483 vma
->vm_page_prot
= pgprot_writecombine(vma
->vm_page_prot
);
487 if (flags
& VM_WRITE
) {
492 * The credit return location for this context could be on the
493 * second or third page allocated for credit returns (if number
494 * of enabled contexts > 64 and 128 respectively).
496 memaddr
= dd
->cr_base
[uctxt
->numa_id
].pa
+
497 (((u64
)uctxt
->sc
->hw_free
-
498 (u64
)dd
->cr_base
[uctxt
->numa_id
].va
) & PAGE_MASK
);
500 flags
&= ~VM_MAYWRITE
;
501 flags
|= VM_DONTCOPY
| VM_DONTEXPAND
;
503 * The driver has already allocated memory for credit
504 * returns and programmed it into the chip. Has that
505 * memory been flagged as non-cached?
507 /* vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); */
511 memaddr
= uctxt
->rcvhdrq_phys
;
512 memlen
= uctxt
->rcvhdrq_size
;
518 * The RcvEgr buffer need to be handled differently
519 * as multiple non-contiguous pages need to be mapped
520 * into the user process.
522 memlen
= uctxt
->egrbufs
.size
;
523 if ((vma
->vm_end
- vma
->vm_start
) != memlen
) {
524 dd_dev_err(dd
, "Eager buffer map size invalid (%lu != %lu)\n",
525 (vma
->vm_end
- vma
->vm_start
), memlen
);
529 if (vma
->vm_flags
& VM_WRITE
) {
533 vma
->vm_flags
&= ~VM_MAYWRITE
;
534 addr
= vma
->vm_start
;
535 for (i
= 0 ; i
< uctxt
->egrbufs
.numbufs
; i
++) {
536 ret
= remap_pfn_range(
538 uctxt
->egrbufs
.buffers
[i
].phys
>> PAGE_SHIFT
,
539 uctxt
->egrbufs
.buffers
[i
].len
,
543 addr
+= uctxt
->egrbufs
.buffers
[i
].len
;
550 * Map only the page that contains this context's user
553 memaddr
= (unsigned long)
554 (dd
->physaddr
+ RXE_PER_CONTEXT_USER
)
555 + (uctxt
->ctxt
* RXE_PER_CONTEXT_SIZE
);
557 * TidFlow table is on the same page as the rest of the
561 flags
|= VM_DONTCOPY
| VM_DONTEXPAND
;
562 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
567 * Use the page where this context's flags are. User level
568 * knows where it's own bitmap is within the page.
570 memaddr
= (unsigned long)(dd
->events
+
571 ((uctxt
->ctxt
- dd
->first_user_ctxt
) *
572 HFI1_MAX_SHARED_CTXTS
)) & PAGE_MASK
;
575 * v3.7 removes VM_RESERVED but the effect is kept by
578 flags
|= VM_IO
| VM_DONTEXPAND
;
582 memaddr
= kvirt_to_phys((void *)dd
->status
);
584 flags
|= VM_IO
| VM_DONTEXPAND
;
587 if (!HFI1_CAP_IS_USET(DMA_RTAIL
)) {
589 * If the memory allocation failed, the context alloc
590 * also would have failed, so we would never get here
595 if (flags
& VM_WRITE
) {
599 memaddr
= uctxt
->rcvhdrqtailaddr_phys
;
601 flags
&= ~VM_MAYWRITE
;
604 memaddr
= (u64
)uctxt
->subctxt_uregbase
;
606 flags
|= VM_IO
| VM_DONTEXPAND
;
609 case SUBCTXT_RCV_HDRQ
:
610 memaddr
= (u64
)uctxt
->subctxt_rcvhdr_base
;
611 memlen
= uctxt
->rcvhdrq_size
* uctxt
->subctxt_cnt
;
612 flags
|= VM_IO
| VM_DONTEXPAND
;
616 memaddr
= (u64
)uctxt
->subctxt_rcvegrbuf
;
617 memlen
= uctxt
->egrbufs
.size
* uctxt
->subctxt_cnt
;
618 flags
|= VM_IO
| VM_DONTEXPAND
;
619 flags
&= ~VM_MAYWRITE
;
623 struct hfi1_user_sdma_comp_q
*cq
= fd
->cq
;
629 memaddr
= (u64
)cq
->comps
;
630 memlen
= PAGE_ALIGN(sizeof(*cq
->comps
) * cq
->nentries
);
631 flags
|= VM_IO
| VM_DONTEXPAND
;
640 if ((vma
->vm_end
- vma
->vm_start
) != memlen
) {
641 hfi1_cdbg(PROC
, "%u:%u Memory size mismatch %lu:%lu",
642 uctxt
->ctxt
, fd
->subctxt
,
643 (vma
->vm_end
- vma
->vm_start
), memlen
);
648 vma
->vm_flags
= flags
;
650 "%u:%u type:%u io/vf:%d/%d, addr:0x%llx, len:%lu(%lu), flags:0x%lx\n",
651 ctxt
, subctxt
, type
, mapio
, vmf
, memaddr
, memlen
,
652 vma
->vm_end
- vma
->vm_start
, vma
->vm_flags
);
653 pfn
= (unsigned long)(memaddr
>> PAGE_SHIFT
);
656 vma
->vm_ops
= &vm_ops
;
659 ret
= io_remap_pfn_range(vma
, vma
->vm_start
, pfn
, memlen
,
662 ret
= remap_pfn_range(vma
, vma
->vm_start
, pfn
, memlen
,
670 * Local (non-chip) user memory is not mapped right away but as it is
671 * accessed by the user-level code.
673 static int vma_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
677 page
= vmalloc_to_page((void *)(vmf
->pgoff
<< PAGE_SHIFT
));
679 return VM_FAULT_SIGBUS
;
687 static unsigned int hfi1_poll(struct file
*fp
, struct poll_table_struct
*pt
)
689 struct hfi1_ctxtdata
*uctxt
;
692 uctxt
= ((struct hfi1_filedata
*)fp
->private_data
)->uctxt
;
695 else if (uctxt
->poll_type
== HFI1_POLL_TYPE_URGENT
)
696 pollflag
= poll_urgent(fp
, pt
);
697 else if (uctxt
->poll_type
== HFI1_POLL_TYPE_ANYRCV
)
698 pollflag
= poll_next(fp
, pt
);
705 static int hfi1_file_close(struct inode
*inode
, struct file
*fp
)
707 struct hfi1_filedata
*fdata
= fp
->private_data
;
708 struct hfi1_ctxtdata
*uctxt
= fdata
->uctxt
;
709 struct hfi1_devdata
*dd
= container_of(inode
->i_cdev
,
712 unsigned long flags
, *ev
;
714 fp
->private_data
= NULL
;
719 hfi1_cdbg(PROC
, "freeing ctxt %u:%u", uctxt
->ctxt
, fdata
->subctxt
);
720 mutex_lock(&hfi1_mutex
);
723 /* drain user sdma queue */
724 hfi1_user_sdma_free_queues(fdata
);
726 /* release the cpu */
727 hfi1_put_proc_affinity(fdata
->rec_cpu_num
);
730 * Clear any left over, unhandled events so the next process that
731 * gets this context doesn't get confused.
733 ev
= dd
->events
+ ((uctxt
->ctxt
- dd
->first_user_ctxt
) *
734 HFI1_MAX_SHARED_CTXTS
) + fdata
->subctxt
;
738 uctxt
->active_slaves
&= ~(1 << fdata
->subctxt
);
739 mutex_unlock(&hfi1_mutex
);
743 spin_lock_irqsave(&dd
->uctxt_lock
, flags
);
745 * Disable receive context and interrupt available, reset all
746 * RcvCtxtCtrl bits to default values.
748 hfi1_rcvctrl(dd
, HFI1_RCVCTRL_CTXT_DIS
|
749 HFI1_RCVCTRL_TIDFLOW_DIS
|
750 HFI1_RCVCTRL_INTRAVAIL_DIS
|
751 HFI1_RCVCTRL_TAILUPD_DIS
|
752 HFI1_RCVCTRL_ONE_PKT_EGR_DIS
|
753 HFI1_RCVCTRL_NO_RHQ_DROP_DIS
|
754 HFI1_RCVCTRL_NO_EGR_DROP_DIS
, uctxt
->ctxt
);
755 /* Clear the context's J_KEY */
756 hfi1_clear_ctxt_jkey(dd
, uctxt
->ctxt
);
758 * Reset context integrity checks to default.
759 * (writes to CSRs probably belong in chip.c)
761 write_kctxt_csr(dd
, uctxt
->sc
->hw_context
, SEND_CTXT_CHECK_ENABLE
,
762 hfi1_pkt_default_send_ctxt_mask(dd
, uctxt
->sc
->type
));
763 sc_disable(uctxt
->sc
);
764 spin_unlock_irqrestore(&dd
->uctxt_lock
, flags
);
766 dd
->rcd
[uctxt
->ctxt
] = NULL
;
768 hfi1_user_exp_rcv_free(fdata
);
769 hfi1_clear_ctxt_pkey(dd
, uctxt
->ctxt
);
771 uctxt
->rcvwait_to
= 0;
772 uctxt
->piowait_to
= 0;
773 uctxt
->rcvnowait
= 0;
774 uctxt
->pionowait
= 0;
775 uctxt
->event_flags
= 0;
777 hfi1_stats
.sps_ctxts
--;
778 if (++dd
->freectxts
== dd
->num_user_contexts
)
780 mutex_unlock(&hfi1_mutex
);
781 hfi1_free_ctxtdata(dd
, uctxt
);
784 kobject_put(&dd
->kobj
);
790 * Convert kernel *virtual* addresses to physical addresses.
791 * This is used to vmalloc'ed addresses.
793 static u64
kvirt_to_phys(void *addr
)
798 page
= vmalloc_to_page(addr
);
800 paddr
= page_to_pfn(page
) << PAGE_SHIFT
;
805 static int assign_ctxt(struct file
*fp
, struct hfi1_user_info
*uinfo
)
807 int i_minor
, ret
= 0;
808 unsigned int swmajor
, swminor
;
810 swmajor
= uinfo
->userversion
>> 16;
811 if (swmajor
!= HFI1_USER_SWMAJOR
) {
816 swminor
= uinfo
->userversion
& 0xffff;
818 mutex_lock(&hfi1_mutex
);
819 /* First, lets check if we need to setup a shared context? */
820 if (uinfo
->subctxt_cnt
) {
821 struct hfi1_filedata
*fd
= fp
->private_data
;
823 ret
= find_shared_ctxt(fp
, uinfo
);
828 hfi1_get_proc_affinity(fd
->uctxt
->numa_id
);
833 * We execute the following block if we couldn't find a
834 * shared context or if context sharing is not required.
837 i_minor
= iminor(file_inode(fp
)) - HFI1_USER_MINOR_BASE
;
838 ret
= get_user_context(fp
, uinfo
, i_minor
);
841 mutex_unlock(&hfi1_mutex
);
846 static int get_user_context(struct file
*fp
, struct hfi1_user_info
*uinfo
,
849 struct hfi1_devdata
*dd
= NULL
;
850 int devmax
, npresent
, nup
;
852 devmax
= hfi1_count_units(&npresent
, &nup
);
859 dd
= hfi1_lookup(devno
);
862 else if (!dd
->freectxts
)
865 return allocate_ctxt(fp
, dd
, uinfo
);
868 static int find_shared_ctxt(struct file
*fp
,
869 const struct hfi1_user_info
*uinfo
)
873 struct hfi1_filedata
*fd
= fp
->private_data
;
875 devmax
= hfi1_count_units(NULL
, NULL
);
877 for (ndev
= 0; ndev
< devmax
; ndev
++) {
878 struct hfi1_devdata
*dd
= hfi1_lookup(ndev
);
880 if (!(dd
&& (dd
->flags
& HFI1_PRESENT
) && dd
->kregbase
))
882 for (i
= dd
->first_user_ctxt
; i
< dd
->num_rcv_contexts
; i
++) {
883 struct hfi1_ctxtdata
*uctxt
= dd
->rcd
[i
];
885 /* Skip ctxts which are not yet open */
886 if (!uctxt
|| !uctxt
->cnt
)
888 /* Skip ctxt if it doesn't match the requested one */
889 if (memcmp(uctxt
->uuid
, uinfo
->uuid
,
890 sizeof(uctxt
->uuid
)) ||
891 uctxt
->jkey
!= generate_jkey(current_uid()) ||
892 uctxt
->subctxt_id
!= uinfo
->subctxt_id
||
893 uctxt
->subctxt_cnt
!= uinfo
->subctxt_cnt
)
896 /* Verify the sharing process matches the master */
897 if (uctxt
->userversion
!= uinfo
->userversion
||
898 uctxt
->cnt
>= uctxt
->subctxt_cnt
) {
903 fd
->subctxt
= uctxt
->cnt
++;
904 uctxt
->active_slaves
|= 1 << fd
->subctxt
;
914 static int allocate_ctxt(struct file
*fp
, struct hfi1_devdata
*dd
,
915 struct hfi1_user_info
*uinfo
)
917 struct hfi1_filedata
*fd
= fp
->private_data
;
918 struct hfi1_ctxtdata
*uctxt
;
922 if (dd
->flags
& HFI1_FROZEN
) {
924 * Pick an error that is unique from all other errors
925 * that are returned so the user process knows that
926 * it tried to allocate while the SPC was frozen. It
927 * it should be able to retry with success in a short
933 for (ctxt
= dd
->first_user_ctxt
; ctxt
< dd
->num_rcv_contexts
; ctxt
++)
937 if (ctxt
== dd
->num_rcv_contexts
)
941 * If we don't have a NUMA node requested, preference is towards
944 fd
->rec_cpu_num
= hfi1_get_proc_affinity(dd
->node
);
945 if (fd
->rec_cpu_num
!= -1)
946 numa
= cpu_to_node(fd
->rec_cpu_num
);
948 numa
= numa_node_id();
949 uctxt
= hfi1_create_ctxtdata(dd
->pport
, ctxt
, numa
);
952 "Unable to allocate ctxtdata memory, failing open\n");
955 hfi1_cdbg(PROC
, "[%u:%u] pid %u assigned to CPU %d (NUMA %u)",
956 uctxt
->ctxt
, fd
->subctxt
, current
->pid
, fd
->rec_cpu_num
,
960 * Allocate and enable a PIO send context.
962 uctxt
->sc
= sc_alloc(dd
, SC_USER
, uctxt
->rcvhdrqentsize
,
967 hfi1_cdbg(PROC
, "allocated send context %u(%u)\n", uctxt
->sc
->sw_index
,
968 uctxt
->sc
->hw_context
);
969 ret
= sc_enable(uctxt
->sc
);
973 * Setup shared context resources if the user-level has requested
974 * shared contexts and this is the 'master' process.
975 * This has to be done here so the rest of the sub-contexts find the
978 if (uinfo
->subctxt_cnt
&& !fd
->subctxt
) {
979 ret
= init_subctxts(uctxt
, uinfo
);
981 * On error, we don't need to disable and de-allocate the
982 * send context because it will be done during file close
987 uctxt
->userversion
= uinfo
->userversion
;
988 uctxt
->flags
= hfi1_cap_mask
; /* save current flag state */
989 init_waitqueue_head(&uctxt
->wait
);
990 strlcpy(uctxt
->comm
, current
->comm
, sizeof(uctxt
->comm
));
991 memcpy(uctxt
->uuid
, uinfo
->uuid
, sizeof(uctxt
->uuid
));
992 uctxt
->jkey
= generate_jkey(current_uid());
993 INIT_LIST_HEAD(&uctxt
->sdma_queues
);
994 spin_lock_init(&uctxt
->sdma_qlock
);
995 hfi1_stats
.sps_ctxts
++;
997 * Disable ASPM when there are open user/PSM contexts to avoid
998 * issues with ASPM L1 exit latency
1000 if (dd
->freectxts
-- == dd
->num_user_contexts
)
1001 aspm_disable_all(dd
);
1007 static int init_subctxts(struct hfi1_ctxtdata
*uctxt
,
1008 const struct hfi1_user_info
*uinfo
)
1010 unsigned num_subctxts
;
1012 num_subctxts
= uinfo
->subctxt_cnt
;
1013 if (num_subctxts
> HFI1_MAX_SHARED_CTXTS
)
1016 uctxt
->subctxt_cnt
= uinfo
->subctxt_cnt
;
1017 uctxt
->subctxt_id
= uinfo
->subctxt_id
;
1018 uctxt
->active_slaves
= 1;
1019 uctxt
->redirect_seq_cnt
= 1;
1020 set_bit(HFI1_CTXT_MASTER_UNINIT
, &uctxt
->event_flags
);
1025 static int setup_subctxt(struct hfi1_ctxtdata
*uctxt
)
1028 unsigned num_subctxts
= uctxt
->subctxt_cnt
;
1030 uctxt
->subctxt_uregbase
= vmalloc_user(PAGE_SIZE
);
1031 if (!uctxt
->subctxt_uregbase
) {
1035 /* We can take the size of the RcvHdr Queue from the master */
1036 uctxt
->subctxt_rcvhdr_base
= vmalloc_user(uctxt
->rcvhdrq_size
*
1038 if (!uctxt
->subctxt_rcvhdr_base
) {
1043 uctxt
->subctxt_rcvegrbuf
= vmalloc_user(uctxt
->egrbufs
.size
*
1045 if (!uctxt
->subctxt_rcvegrbuf
) {
1051 vfree(uctxt
->subctxt_rcvhdr_base
);
1053 vfree(uctxt
->subctxt_uregbase
);
1054 uctxt
->subctxt_uregbase
= NULL
;
1059 static int user_init(struct file
*fp
)
1061 unsigned int rcvctrl_ops
= 0;
1062 struct hfi1_filedata
*fd
= fp
->private_data
;
1063 struct hfi1_ctxtdata
*uctxt
= fd
->uctxt
;
1065 /* make sure that the context has already been setup */
1066 if (!test_bit(HFI1_CTXT_SETUP_DONE
, &uctxt
->event_flags
))
1069 /* initialize poll variables... */
1071 uctxt
->urgent_poll
= 0;
1074 * Now enable the ctxt for receive.
1075 * For chips that are set to DMA the tail register to memory
1076 * when they change (and when the update bit transitions from
1077 * 0 to 1. So for those chips, we turn it off and then back on.
1078 * This will (very briefly) affect any other open ctxts, but the
1079 * duration is very short, and therefore isn't an issue. We
1080 * explicitly set the in-memory tail copy to 0 beforehand, so we
1081 * don't have to wait to be sure the DMA update has happened
1082 * (chip resets head/tail to 0 on transition to enable).
1084 if (uctxt
->rcvhdrtail_kvaddr
)
1085 clear_rcvhdrtail(uctxt
);
1087 /* Setup J_KEY before enabling the context */
1088 hfi1_set_ctxt_jkey(uctxt
->dd
, uctxt
->ctxt
, uctxt
->jkey
);
1090 rcvctrl_ops
= HFI1_RCVCTRL_CTXT_ENB
;
1091 if (HFI1_CAP_UGET_MASK(uctxt
->flags
, HDRSUPP
))
1092 rcvctrl_ops
|= HFI1_RCVCTRL_TIDFLOW_ENB
;
1094 * Ignore the bit in the flags for now until proper
1095 * support for multiple packet per rcv array entry is
1098 if (!HFI1_CAP_UGET_MASK(uctxt
->flags
, MULTI_PKT_EGR
))
1099 rcvctrl_ops
|= HFI1_RCVCTRL_ONE_PKT_EGR_ENB
;
1100 if (HFI1_CAP_UGET_MASK(uctxt
->flags
, NODROP_EGR_FULL
))
1101 rcvctrl_ops
|= HFI1_RCVCTRL_NO_EGR_DROP_ENB
;
1102 if (HFI1_CAP_UGET_MASK(uctxt
->flags
, NODROP_RHQ_FULL
))
1103 rcvctrl_ops
|= HFI1_RCVCTRL_NO_RHQ_DROP_ENB
;
1105 * The RcvCtxtCtrl.TailUpd bit has to be explicitly written.
1106 * We can't rely on the correct value to be set from prior
1107 * uses of the chip or ctxt. Therefore, add the rcvctrl op
1110 if (HFI1_CAP_UGET_MASK(uctxt
->flags
, DMA_RTAIL
))
1111 rcvctrl_ops
|= HFI1_RCVCTRL_TAILUPD_ENB
;
1113 rcvctrl_ops
|= HFI1_RCVCTRL_TAILUPD_DIS
;
1114 hfi1_rcvctrl(uctxt
->dd
, rcvctrl_ops
, uctxt
->ctxt
);
1116 /* Notify any waiting slaves */
1117 if (uctxt
->subctxt_cnt
) {
1118 clear_bit(HFI1_CTXT_MASTER_UNINIT
, &uctxt
->event_flags
);
1119 wake_up(&uctxt
->wait
);
1125 static int get_ctxt_info(struct file
*fp
, void __user
*ubase
, __u32 len
)
1127 struct hfi1_ctxt_info cinfo
;
1128 struct hfi1_filedata
*fd
= fp
->private_data
;
1129 struct hfi1_ctxtdata
*uctxt
= fd
->uctxt
;
1132 memset(&cinfo
, 0, sizeof(cinfo
));
1133 cinfo
.runtime_flags
= (((uctxt
->flags
>> HFI1_CAP_MISC_SHIFT
) &
1134 HFI1_CAP_MISC_MASK
) << HFI1_CAP_USER_SHIFT
) |
1135 HFI1_CAP_UGET_MASK(uctxt
->flags
, MASK
) |
1136 HFI1_CAP_KGET_MASK(uctxt
->flags
, K2U
);
1137 /* adjust flag if this fd is not able to cache */
1139 cinfo
.runtime_flags
|= HFI1_CAP_TID_UNMAP
; /* no caching */
1141 cinfo
.num_active
= hfi1_count_active_units();
1142 cinfo
.unit
= uctxt
->dd
->unit
;
1143 cinfo
.ctxt
= uctxt
->ctxt
;
1144 cinfo
.subctxt
= fd
->subctxt
;
1145 cinfo
.rcvtids
= roundup(uctxt
->egrbufs
.alloced
,
1146 uctxt
->dd
->rcv_entries
.group_size
) +
1147 uctxt
->expected_count
;
1148 cinfo
.credits
= uctxt
->sc
->credits
;
1149 cinfo
.numa_node
= uctxt
->numa_id
;
1150 cinfo
.rec_cpu
= fd
->rec_cpu_num
;
1151 cinfo
.send_ctxt
= uctxt
->sc
->hw_context
;
1153 cinfo
.egrtids
= uctxt
->egrbufs
.alloced
;
1154 cinfo
.rcvhdrq_cnt
= uctxt
->rcvhdrq_cnt
;
1155 cinfo
.rcvhdrq_entsize
= uctxt
->rcvhdrqentsize
<< 2;
1156 cinfo
.sdma_ring_size
= fd
->cq
->nentries
;
1157 cinfo
.rcvegr_size
= uctxt
->egrbufs
.rcvtid_size
;
1159 trace_hfi1_ctxt_info(uctxt
->dd
, uctxt
->ctxt
, fd
->subctxt
, cinfo
);
1160 if (copy_to_user(ubase
, &cinfo
, sizeof(cinfo
)))
1166 static int setup_ctxt(struct file
*fp
)
1168 struct hfi1_filedata
*fd
= fp
->private_data
;
1169 struct hfi1_ctxtdata
*uctxt
= fd
->uctxt
;
1170 struct hfi1_devdata
*dd
= uctxt
->dd
;
1174 * Context should be set up only once, including allocation and
1175 * programming of eager buffers. This is done if context sharing
1176 * is not requested or by the master process.
1178 if (!uctxt
->subctxt_cnt
|| !fd
->subctxt
) {
1179 ret
= hfi1_init_ctxt(uctxt
->sc
);
1183 /* Now allocate the RcvHdr queue and eager buffers. */
1184 ret
= hfi1_create_rcvhdrq(dd
, uctxt
);
1187 ret
= hfi1_setup_eagerbufs(uctxt
);
1190 if (uctxt
->subctxt_cnt
&& !fd
->subctxt
) {
1191 ret
= setup_subctxt(uctxt
);
1196 ret
= wait_event_interruptible(uctxt
->wait
, !test_bit(
1197 HFI1_CTXT_MASTER_UNINIT
,
1198 &uctxt
->event_flags
));
1203 ret
= hfi1_user_sdma_alloc_queues(uctxt
, fp
);
1207 * Expected receive has to be setup for all processes (including
1208 * shared contexts). However, it has to be done after the master
1209 * context has been fully configured as it depends on the
1210 * eager/expected split of the RcvArray entries.
1211 * Setting it up here ensures that the subcontexts will be waiting
1212 * (due to the above wait_event_interruptible() until the master
1215 ret
= hfi1_user_exp_rcv_init(fp
);
1219 set_bit(HFI1_CTXT_SETUP_DONE
, &uctxt
->event_flags
);
1224 static int get_base_info(struct file
*fp
, void __user
*ubase
, __u32 len
)
1226 struct hfi1_base_info binfo
;
1227 struct hfi1_filedata
*fd
= fp
->private_data
;
1228 struct hfi1_ctxtdata
*uctxt
= fd
->uctxt
;
1229 struct hfi1_devdata
*dd
= uctxt
->dd
;
1234 trace_hfi1_uctxtdata(uctxt
->dd
, uctxt
);
1236 memset(&binfo
, 0, sizeof(binfo
));
1237 binfo
.hw_version
= dd
->revision
;
1238 binfo
.sw_version
= HFI1_KERN_SWVERSION
;
1239 binfo
.bthqp
= kdeth_qp
;
1240 binfo
.jkey
= uctxt
->jkey
;
1242 * If more than 64 contexts are enabled the allocated credit
1243 * return will span two or three contiguous pages. Since we only
1244 * map the page containing the context's credit return address,
1245 * we need to calculate the offset in the proper page.
1247 offset
= ((u64
)uctxt
->sc
->hw_free
-
1248 (u64
)dd
->cr_base
[uctxt
->numa_id
].va
) % PAGE_SIZE
;
1249 binfo
.sc_credits_addr
= HFI1_MMAP_TOKEN(PIO_CRED
, uctxt
->ctxt
,
1250 fd
->subctxt
, offset
);
1251 binfo
.pio_bufbase
= HFI1_MMAP_TOKEN(PIO_BUFS
, uctxt
->ctxt
,
1253 uctxt
->sc
->base_addr
);
1254 binfo
.pio_bufbase_sop
= HFI1_MMAP_TOKEN(PIO_BUFS_SOP
,
1257 uctxt
->sc
->base_addr
);
1258 binfo
.rcvhdr_bufbase
= HFI1_MMAP_TOKEN(RCV_HDRQ
, uctxt
->ctxt
,
1261 binfo
.rcvegr_bufbase
= HFI1_MMAP_TOKEN(RCV_EGRBUF
, uctxt
->ctxt
,
1263 uctxt
->egrbufs
.rcvtids
[0].phys
);
1264 binfo
.sdma_comp_bufbase
= HFI1_MMAP_TOKEN(SDMA_COMP
, uctxt
->ctxt
,
1268 * (RXE_PER_CONTEXT_USER + (ctxt * RXE_PER_CONTEXT_SIZE))
1270 binfo
.user_regbase
= HFI1_MMAP_TOKEN(UREGS
, uctxt
->ctxt
,
1272 offset
= offset_in_page((((uctxt
->ctxt
- dd
->first_user_ctxt
) *
1273 HFI1_MAX_SHARED_CTXTS
) + fd
->subctxt
) *
1274 sizeof(*dd
->events
));
1275 binfo
.events_bufbase
= HFI1_MMAP_TOKEN(EVENTS
, uctxt
->ctxt
,
1278 binfo
.status_bufbase
= HFI1_MMAP_TOKEN(STATUS
, uctxt
->ctxt
,
1281 if (HFI1_CAP_IS_USET(DMA_RTAIL
))
1282 binfo
.rcvhdrtail_base
= HFI1_MMAP_TOKEN(RTAIL
, uctxt
->ctxt
,
1284 if (uctxt
->subctxt_cnt
) {
1285 binfo
.subctxt_uregbase
= HFI1_MMAP_TOKEN(SUBCTXT_UREGS
,
1288 binfo
.subctxt_rcvhdrbuf
= HFI1_MMAP_TOKEN(SUBCTXT_RCV_HDRQ
,
1291 binfo
.subctxt_rcvegrbuf
= HFI1_MMAP_TOKEN(SUBCTXT_EGRBUF
,
1295 sz
= (len
< sizeof(binfo
)) ? len
: sizeof(binfo
);
1296 if (copy_to_user(ubase
, &binfo
, sz
))
1301 static unsigned int poll_urgent(struct file
*fp
,
1302 struct poll_table_struct
*pt
)
1304 struct hfi1_filedata
*fd
= fp
->private_data
;
1305 struct hfi1_ctxtdata
*uctxt
= fd
->uctxt
;
1306 struct hfi1_devdata
*dd
= uctxt
->dd
;
1309 poll_wait(fp
, &uctxt
->wait
, pt
);
1311 spin_lock_irq(&dd
->uctxt_lock
);
1312 if (uctxt
->urgent
!= uctxt
->urgent_poll
) {
1313 pollflag
= POLLIN
| POLLRDNORM
;
1314 uctxt
->urgent_poll
= uctxt
->urgent
;
1317 set_bit(HFI1_CTXT_WAITING_URG
, &uctxt
->event_flags
);
1319 spin_unlock_irq(&dd
->uctxt_lock
);
1324 static unsigned int poll_next(struct file
*fp
,
1325 struct poll_table_struct
*pt
)
1327 struct hfi1_filedata
*fd
= fp
->private_data
;
1328 struct hfi1_ctxtdata
*uctxt
= fd
->uctxt
;
1329 struct hfi1_devdata
*dd
= uctxt
->dd
;
1332 poll_wait(fp
, &uctxt
->wait
, pt
);
1334 spin_lock_irq(&dd
->uctxt_lock
);
1335 if (hdrqempty(uctxt
)) {
1336 set_bit(HFI1_CTXT_WAITING_RCV
, &uctxt
->event_flags
);
1337 hfi1_rcvctrl(dd
, HFI1_RCVCTRL_INTRAVAIL_ENB
, uctxt
->ctxt
);
1340 pollflag
= POLLIN
| POLLRDNORM
;
1342 spin_unlock_irq(&dd
->uctxt_lock
);
1348 * Find all user contexts in use, and set the specified bit in their
1350 * See also find_ctxt() for a similar use, that is specific to send buffers.
1352 int hfi1_set_uevent_bits(struct hfi1_pportdata
*ppd
, const int evtbit
)
1354 struct hfi1_ctxtdata
*uctxt
;
1355 struct hfi1_devdata
*dd
= ppd
->dd
;
1358 unsigned long flags
;
1365 spin_lock_irqsave(&dd
->uctxt_lock
, flags
);
1366 for (ctxt
= dd
->first_user_ctxt
; ctxt
< dd
->num_rcv_contexts
;
1368 uctxt
= dd
->rcd
[ctxt
];
1370 unsigned long *evs
= dd
->events
+
1371 (uctxt
->ctxt
- dd
->first_user_ctxt
) *
1372 HFI1_MAX_SHARED_CTXTS
;
1375 * subctxt_cnt is 0 if not shared, so do base
1376 * separately, first, then remaining subctxt, if any
1378 set_bit(evtbit
, evs
);
1379 for (i
= 1; i
< uctxt
->subctxt_cnt
; i
++)
1380 set_bit(evtbit
, evs
+ i
);
1383 spin_unlock_irqrestore(&dd
->uctxt_lock
, flags
);
1389 * manage_rcvq - manage a context's receive queue
1390 * @uctxt: the context
1391 * @subctxt: the sub-context
1392 * @start_stop: action to carry out
1394 * start_stop == 0 disables receive on the context, for use in queue
1395 * overflow conditions. start_stop==1 re-enables, to be used to
1396 * re-init the software copy of the head register
1398 static int manage_rcvq(struct hfi1_ctxtdata
*uctxt
, unsigned subctxt
,
1401 struct hfi1_devdata
*dd
= uctxt
->dd
;
1402 unsigned int rcvctrl_op
;
1406 /* atomically clear receive enable ctxt. */
1409 * On enable, force in-memory copy of the tail register to
1410 * 0, so that protocol code doesn't have to worry about
1411 * whether or not the chip has yet updated the in-memory
1412 * copy or not on return from the system call. The chip
1413 * always resets it's tail register back to 0 on a
1414 * transition from disabled to enabled.
1416 if (uctxt
->rcvhdrtail_kvaddr
)
1417 clear_rcvhdrtail(uctxt
);
1418 rcvctrl_op
= HFI1_RCVCTRL_CTXT_ENB
;
1420 rcvctrl_op
= HFI1_RCVCTRL_CTXT_DIS
;
1422 hfi1_rcvctrl(dd
, rcvctrl_op
, uctxt
->ctxt
);
1423 /* always; new head should be equal to new tail; see above */
1429 * clear the event notifier events for this context.
1430 * User process then performs actions appropriate to bit having been
1431 * set, if desired, and checks again in future.
1433 static int user_event_ack(struct hfi1_ctxtdata
*uctxt
, int subctxt
,
1434 unsigned long events
)
1437 struct hfi1_devdata
*dd
= uctxt
->dd
;
1443 evs
= dd
->events
+ ((uctxt
->ctxt
- dd
->first_user_ctxt
) *
1444 HFI1_MAX_SHARED_CTXTS
) + subctxt
;
1446 for (i
= 0; i
<= _HFI1_MAX_EVENT_BIT
; i
++) {
1447 if (!test_bit(i
, &events
))
1454 static int set_ctxt_pkey(struct hfi1_ctxtdata
*uctxt
, unsigned subctxt
,
1457 int ret
= -ENOENT
, i
, intable
= 0;
1458 struct hfi1_pportdata
*ppd
= uctxt
->ppd
;
1459 struct hfi1_devdata
*dd
= uctxt
->dd
;
1461 if (pkey
== LIM_MGMT_P_KEY
|| pkey
== FULL_MGMT_P_KEY
) {
1466 for (i
= 0; i
< ARRAY_SIZE(ppd
->pkeys
); i
++)
1467 if (pkey
== ppd
->pkeys
[i
]) {
1473 ret
= hfi1_set_ctxt_pkey(dd
, uctxt
->ctxt
, pkey
);
1478 static void user_remove(struct hfi1_devdata
*dd
)
1481 hfi1_cdev_cleanup(&dd
->user_cdev
, &dd
->user_device
);
1484 static int user_add(struct hfi1_devdata
*dd
)
1489 snprintf(name
, sizeof(name
), "%s_%d", class_name(), dd
->unit
);
1490 ret
= hfi1_cdev_init(dd
->unit
, name
, &hfi1_file_ops
,
1491 &dd
->user_cdev
, &dd
->user_device
,
1500 * Create per-unit files in /dev
1502 int hfi1_device_create(struct hfi1_devdata
*dd
)
1504 return user_add(dd
);
1508 * Remove per-unit files in /dev
1509 * void, core kernel returns no errors for this stuff
1511 void hfi1_device_remove(struct hfi1_devdata
*dd
)