2 * Copyright(c) 2015, 2016 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
47 #include <linux/poll.h>
48 #include <linux/cdev.h>
49 #include <linux/vmalloc.h>
57 #include "user_sdma.h"
58 #include "user_exp_rcv.h"
64 #define pr_fmt(fmt) DRIVER_NAME ": " fmt
66 #define SEND_CTXT_HALT_TIMEOUT 1000 /* msecs */
69 * File operation functions
71 static int hfi1_file_open(struct inode
*, struct file
*);
72 static int hfi1_file_close(struct inode
*, struct file
*);
73 static ssize_t
hfi1_file_write(struct file
*, const char __user
*,
75 static ssize_t
hfi1_write_iter(struct kiocb
*, struct iov_iter
*);
76 static unsigned int hfi1_poll(struct file
*, struct poll_table_struct
*);
77 static int hfi1_file_mmap(struct file
*, struct vm_area_struct
*);
79 static u64
kvirt_to_phys(void *);
80 static int assign_ctxt(struct file
*, struct hfi1_user_info
*);
81 static int init_subctxts(struct hfi1_ctxtdata
*, const struct hfi1_user_info
*);
82 static int user_init(struct file
*);
83 static int get_ctxt_info(struct file
*, void __user
*, __u32
);
84 static int get_base_info(struct file
*, void __user
*, __u32
);
85 static int setup_ctxt(struct file
*);
86 static int setup_subctxt(struct hfi1_ctxtdata
*);
87 static int get_user_context(struct file
*, struct hfi1_user_info
*,
89 static int find_shared_ctxt(struct file
*, const struct hfi1_user_info
*);
90 static int allocate_ctxt(struct file
*, struct hfi1_devdata
*,
91 struct hfi1_user_info
*);
92 static unsigned int poll_urgent(struct file
*, struct poll_table_struct
*);
93 static unsigned int poll_next(struct file
*, struct poll_table_struct
*);
94 static int user_event_ack(struct hfi1_ctxtdata
*, int, unsigned long);
95 static int set_ctxt_pkey(struct hfi1_ctxtdata
*, unsigned, u16
);
96 static int manage_rcvq(struct hfi1_ctxtdata
*, unsigned, int);
97 static int vma_fault(struct vm_area_struct
*, struct vm_fault
*);
99 static const struct file_operations hfi1_file_ops
= {
100 .owner
= THIS_MODULE
,
101 .write
= hfi1_file_write
,
102 .write_iter
= hfi1_write_iter
,
103 .open
= hfi1_file_open
,
104 .release
= hfi1_file_close
,
106 .mmap
= hfi1_file_mmap
,
107 .llseek
= noop_llseek
,
110 static struct vm_operations_struct vm_ops
= {
115 * Types of memories mapped into user processes' space
134 * Masks and offsets defining the mmap tokens
136 #define HFI1_MMAP_OFFSET_MASK 0xfffULL
137 #define HFI1_MMAP_OFFSET_SHIFT 0
138 #define HFI1_MMAP_SUBCTXT_MASK 0xfULL
139 #define HFI1_MMAP_SUBCTXT_SHIFT 12
140 #define HFI1_MMAP_CTXT_MASK 0xffULL
141 #define HFI1_MMAP_CTXT_SHIFT 16
142 #define HFI1_MMAP_TYPE_MASK 0xfULL
143 #define HFI1_MMAP_TYPE_SHIFT 24
144 #define HFI1_MMAP_MAGIC_MASK 0xffffffffULL
145 #define HFI1_MMAP_MAGIC_SHIFT 32
147 #define HFI1_MMAP_MAGIC 0xdabbad00
149 #define HFI1_MMAP_TOKEN_SET(field, val) \
150 (((val) & HFI1_MMAP_##field##_MASK) << HFI1_MMAP_##field##_SHIFT)
151 #define HFI1_MMAP_TOKEN_GET(field, token) \
152 (((token) >> HFI1_MMAP_##field##_SHIFT) & HFI1_MMAP_##field##_MASK)
153 #define HFI1_MMAP_TOKEN(type, ctxt, subctxt, addr) \
154 (HFI1_MMAP_TOKEN_SET(MAGIC, HFI1_MMAP_MAGIC) | \
155 HFI1_MMAP_TOKEN_SET(TYPE, type) | \
156 HFI1_MMAP_TOKEN_SET(CTXT, ctxt) | \
157 HFI1_MMAP_TOKEN_SET(SUBCTXT, subctxt) | \
158 HFI1_MMAP_TOKEN_SET(OFFSET, (offset_in_page(addr))))
160 #define dbg(fmt, ...) \
161 pr_info(fmt, ##__VA_ARGS__)
163 static inline int is_valid_mmap(u64 token
)
165 return (HFI1_MMAP_TOKEN_GET(MAGIC
, token
) == HFI1_MMAP_MAGIC
);
168 static int hfi1_file_open(struct inode
*inode
, struct file
*fp
)
170 /* The real work is performed later in assign_ctxt() */
171 fp
->private_data
= kzalloc(sizeof(struct hfi1_filedata
), GFP_KERNEL
);
172 if (fp
->private_data
) /* no cpu affinity by default */
173 ((struct hfi1_filedata
*)fp
->private_data
)->rec_cpu_num
= -1;
174 return fp
->private_data
? 0 : -ENOMEM
;
177 static ssize_t
hfi1_file_write(struct file
*fp
, const char __user
*data
,
178 size_t count
, loff_t
*offset
)
180 const struct hfi1_cmd __user
*ucmd
;
181 struct hfi1_filedata
*fd
= fp
->private_data
;
182 struct hfi1_ctxtdata
*uctxt
= fd
->uctxt
;
184 struct hfi1_user_info uinfo
;
185 struct hfi1_tid_info tinfo
;
187 ssize_t consumed
= 0, copy
= 0, ret
= 0;
190 int uctxt_required
= 1;
191 int must_be_root
= 0;
193 if (count
< sizeof(cmd
)) {
198 ucmd
= (const struct hfi1_cmd __user
*)data
;
199 if (copy_from_user(&cmd
, ucmd
, sizeof(cmd
))) {
204 consumed
= sizeof(cmd
);
207 case HFI1_CMD_ASSIGN_CTXT
:
208 uctxt_required
= 0; /* assigned user context not required */
209 copy
= sizeof(uinfo
);
212 case HFI1_CMD_SDMA_STATUS_UPD
:
213 case HFI1_CMD_CREDIT_UPD
:
216 case HFI1_CMD_TID_UPDATE
:
217 case HFI1_CMD_TID_FREE
:
218 case HFI1_CMD_TID_INVAL_READ
:
219 copy
= sizeof(tinfo
);
222 case HFI1_CMD_USER_INFO
:
223 case HFI1_CMD_RECV_CTRL
:
224 case HFI1_CMD_POLL_TYPE
:
225 case HFI1_CMD_ACK_EVENT
:
226 case HFI1_CMD_CTXT_INFO
:
227 case HFI1_CMD_SET_PKEY
:
228 case HFI1_CMD_CTXT_RESET
:
232 case HFI1_CMD_EP_INFO
:
233 case HFI1_CMD_EP_ERASE_CHIP
:
234 case HFI1_CMD_EP_ERASE_RANGE
:
235 case HFI1_CMD_EP_READ_RANGE
:
236 case HFI1_CMD_EP_WRITE_RANGE
:
237 uctxt_required
= 0; /* assigned user context not required */
238 must_be_root
= 1; /* validate user */
246 /* If the command comes with user data, copy it. */
248 if (copy_from_user(dest
, (void __user
*)cmd
.addr
, copy
)) {
256 * Make sure there is a uctxt when needed.
258 if (uctxt_required
&& !uctxt
) {
263 /* only root can do these operations */
264 if (must_be_root
&& !capable(CAP_SYS_ADMIN
)) {
270 case HFI1_CMD_ASSIGN_CTXT
:
271 ret
= assign_ctxt(fp
, &uinfo
);
274 ret
= setup_ctxt(fp
);
279 case HFI1_CMD_CTXT_INFO
:
280 ret
= get_ctxt_info(fp
, (void __user
*)(unsigned long)
283 case HFI1_CMD_USER_INFO
:
284 ret
= get_base_info(fp
, (void __user
*)(unsigned long)
287 case HFI1_CMD_SDMA_STATUS_UPD
:
289 case HFI1_CMD_CREDIT_UPD
:
290 if (uctxt
&& uctxt
->sc
)
291 sc_return_credits(uctxt
->sc
);
293 case HFI1_CMD_TID_UPDATE
:
294 ret
= hfi1_user_exp_rcv_setup(fp
, &tinfo
);
297 * Copy the number of tidlist entries we used
298 * and the length of the buffer we registered.
299 * These fields are adjacent in the structure so
300 * we can copy them at the same time.
302 addr
= (unsigned long)cmd
.addr
+
303 offsetof(struct hfi1_tid_info
, tidcnt
);
304 if (copy_to_user((void __user
*)addr
, &tinfo
.tidcnt
,
305 sizeof(tinfo
.tidcnt
) +
306 sizeof(tinfo
.length
)))
310 case HFI1_CMD_TID_INVAL_READ
:
311 ret
= hfi1_user_exp_rcv_invalid(fp
, &tinfo
);
314 addr
= (unsigned long)cmd
.addr
+
315 offsetof(struct hfi1_tid_info
, tidcnt
);
316 if (copy_to_user((void __user
*)addr
, &tinfo
.tidcnt
,
317 sizeof(tinfo
.tidcnt
)))
320 case HFI1_CMD_TID_FREE
:
321 ret
= hfi1_user_exp_rcv_clear(fp
, &tinfo
);
324 addr
= (unsigned long)cmd
.addr
+
325 offsetof(struct hfi1_tid_info
, tidcnt
);
326 if (copy_to_user((void __user
*)addr
, &tinfo
.tidcnt
,
327 sizeof(tinfo
.tidcnt
)))
330 case HFI1_CMD_RECV_CTRL
:
331 ret
= manage_rcvq(uctxt
, fd
->subctxt
, (int)user_val
);
333 case HFI1_CMD_POLL_TYPE
:
334 uctxt
->poll_type
= (typeof(uctxt
->poll_type
))user_val
;
336 case HFI1_CMD_ACK_EVENT
:
337 ret
= user_event_ack(uctxt
, fd
->subctxt
, user_val
);
339 case HFI1_CMD_SET_PKEY
:
340 if (HFI1_CAP_IS_USET(PKEY_CHECK
))
341 ret
= set_ctxt_pkey(uctxt
, fd
->subctxt
, user_val
);
345 case HFI1_CMD_CTXT_RESET
: {
346 struct send_context
*sc
;
347 struct hfi1_devdata
*dd
;
349 if (!uctxt
|| !uctxt
->dd
|| !uctxt
->sc
) {
354 * There is no protection here. User level has to
355 * guarantee that no one will be writing to the send
356 * context while it is being re-initialized.
357 * If user level breaks that guarantee, it will break
358 * it's own context and no one else's.
363 * Wait until the interrupt handler has marked the
364 * context as halted or frozen. Report error if we time
367 wait_event_interruptible_timeout(
368 sc
->halt_wait
, (sc
->flags
& SCF_HALTED
),
369 msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT
));
370 if (!(sc
->flags
& SCF_HALTED
)) {
375 * If the send context was halted due to a Freeze,
376 * wait until the device has been "unfrozen" before
377 * resetting the context.
379 if (sc
->flags
& SCF_FROZEN
) {
380 wait_event_interruptible_timeout(
382 !(ACCESS_ONCE(dd
->flags
) & HFI1_FROZEN
),
383 msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT
));
384 if (dd
->flags
& HFI1_FROZEN
) {
388 if (dd
->flags
& HFI1_FORCED_FREEZE
) {
390 * Don't allow context reset if we are into
398 hfi1_rcvctrl(dd
, HFI1_RCVCTRL_CTXT_ENB
,
401 ret
= sc_restart(sc
);
404 sc_return_credits(sc
);
407 case HFI1_CMD_EP_INFO
:
408 case HFI1_CMD_EP_ERASE_CHIP
:
409 case HFI1_CMD_EP_ERASE_RANGE
:
410 case HFI1_CMD_EP_READ_RANGE
:
411 case HFI1_CMD_EP_WRITE_RANGE
:
412 ret
= handle_eprom_command(fp
, &cmd
);
422 static ssize_t
hfi1_write_iter(struct kiocb
*kiocb
, struct iov_iter
*from
)
424 struct hfi1_filedata
*fd
= kiocb
->ki_filp
->private_data
;
425 struct hfi1_user_sdma_pkt_q
*pq
= fd
->pq
;
426 struct hfi1_user_sdma_comp_q
*cq
= fd
->cq
;
427 int ret
= 0, done
= 0, reqs
= 0;
428 unsigned long dim
= from
->nr_segs
;
435 if (!iter_is_iovec(from
) || !dim
) {
440 hfi1_cdbg(SDMA
, "SDMA request from %u:%u (%lu)",
441 fd
->uctxt
->ctxt
, fd
->subctxt
, dim
);
443 if (atomic_read(&pq
->n_reqs
) == pq
->n_max_reqs
) {
449 unsigned long count
= 0;
451 ret
= hfi1_user_sdma_process_request(
452 kiocb
->ki_filp
, (struct iovec
*)(from
->iov
+ done
),
461 return ret
? ret
: reqs
;
464 static int hfi1_file_mmap(struct file
*fp
, struct vm_area_struct
*vma
)
466 struct hfi1_filedata
*fd
= fp
->private_data
;
467 struct hfi1_ctxtdata
*uctxt
= fd
->uctxt
;
468 struct hfi1_devdata
*dd
;
469 unsigned long flags
, pfn
;
470 u64 token
= vma
->vm_pgoff
<< PAGE_SHIFT
,
472 u8 subctxt
, mapio
= 0, vmf
= 0, type
;
477 if (!is_valid_mmap(token
) || !uctxt
||
478 !(vma
->vm_flags
& VM_SHARED
)) {
483 ctxt
= HFI1_MMAP_TOKEN_GET(CTXT
, token
);
484 subctxt
= HFI1_MMAP_TOKEN_GET(SUBCTXT
, token
);
485 type
= HFI1_MMAP_TOKEN_GET(TYPE
, token
);
486 if (ctxt
!= uctxt
->ctxt
|| subctxt
!= fd
->subctxt
) {
491 flags
= vma
->vm_flags
;
496 memaddr
= ((dd
->physaddr
+ TXE_PIO_SEND
) +
498 (uctxt
->sc
->hw_context
* BIT(16))) +
499 /* 64K PIO space / ctxt */
500 (type
== PIO_BUFS_SOP
?
501 (TXE_PIO_SIZE
/ 2) : 0); /* sop? */
503 * Map only the amount allocated to the context, not the
504 * entire available context's PIO space.
506 memlen
= PAGE_ALIGN(uctxt
->sc
->credits
* PIO_BLOCK_SIZE
);
507 flags
&= ~VM_MAYREAD
;
508 flags
|= VM_DONTCOPY
| VM_DONTEXPAND
;
509 vma
->vm_page_prot
= pgprot_writecombine(vma
->vm_page_prot
);
513 if (flags
& VM_WRITE
) {
518 * The credit return location for this context could be on the
519 * second or third page allocated for credit returns (if number
520 * of enabled contexts > 64 and 128 respectively).
522 memaddr
= dd
->cr_base
[uctxt
->numa_id
].pa
+
523 (((u64
)uctxt
->sc
->hw_free
-
524 (u64
)dd
->cr_base
[uctxt
->numa_id
].va
) & PAGE_MASK
);
526 flags
&= ~VM_MAYWRITE
;
527 flags
|= VM_DONTCOPY
| VM_DONTEXPAND
;
529 * The driver has already allocated memory for credit
530 * returns and programmed it into the chip. Has that
531 * memory been flagged as non-cached?
533 /* vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); */
537 memaddr
= uctxt
->rcvhdrq_phys
;
538 memlen
= uctxt
->rcvhdrq_size
;
544 * The RcvEgr buffer need to be handled differently
545 * as multiple non-contiguous pages need to be mapped
546 * into the user process.
548 memlen
= uctxt
->egrbufs
.size
;
549 if ((vma
->vm_end
- vma
->vm_start
) != memlen
) {
550 dd_dev_err(dd
, "Eager buffer map size invalid (%lu != %lu)\n",
551 (vma
->vm_end
- vma
->vm_start
), memlen
);
555 if (vma
->vm_flags
& VM_WRITE
) {
559 vma
->vm_flags
&= ~VM_MAYWRITE
;
560 addr
= vma
->vm_start
;
561 for (i
= 0 ; i
< uctxt
->egrbufs
.numbufs
; i
++) {
562 ret
= remap_pfn_range(
564 uctxt
->egrbufs
.buffers
[i
].phys
>> PAGE_SHIFT
,
565 uctxt
->egrbufs
.buffers
[i
].len
,
569 addr
+= uctxt
->egrbufs
.buffers
[i
].len
;
576 * Map only the page that contains this context's user
579 memaddr
= (unsigned long)
580 (dd
->physaddr
+ RXE_PER_CONTEXT_USER
)
581 + (uctxt
->ctxt
* RXE_PER_CONTEXT_SIZE
);
583 * TidFlow table is on the same page as the rest of the
587 flags
|= VM_DONTCOPY
| VM_DONTEXPAND
;
588 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
593 * Use the page where this context's flags are. User level
594 * knows where it's own bitmap is within the page.
596 memaddr
= (unsigned long)(dd
->events
+
597 ((uctxt
->ctxt
- dd
->first_user_ctxt
) *
598 HFI1_MAX_SHARED_CTXTS
)) & PAGE_MASK
;
601 * v3.7 removes VM_RESERVED but the effect is kept by
604 flags
|= VM_IO
| VM_DONTEXPAND
;
608 memaddr
= kvirt_to_phys((void *)dd
->status
);
610 flags
|= VM_IO
| VM_DONTEXPAND
;
613 if (!HFI1_CAP_IS_USET(DMA_RTAIL
)) {
615 * If the memory allocation failed, the context alloc
616 * also would have failed, so we would never get here
621 if (flags
& VM_WRITE
) {
625 memaddr
= uctxt
->rcvhdrqtailaddr_phys
;
627 flags
&= ~VM_MAYWRITE
;
630 memaddr
= (u64
)uctxt
->subctxt_uregbase
;
632 flags
|= VM_IO
| VM_DONTEXPAND
;
635 case SUBCTXT_RCV_HDRQ
:
636 memaddr
= (u64
)uctxt
->subctxt_rcvhdr_base
;
637 memlen
= uctxt
->rcvhdrq_size
* uctxt
->subctxt_cnt
;
638 flags
|= VM_IO
| VM_DONTEXPAND
;
642 memaddr
= (u64
)uctxt
->subctxt_rcvegrbuf
;
643 memlen
= uctxt
->egrbufs
.size
* uctxt
->subctxt_cnt
;
644 flags
|= VM_IO
| VM_DONTEXPAND
;
645 flags
&= ~VM_MAYWRITE
;
649 struct hfi1_user_sdma_comp_q
*cq
= fd
->cq
;
655 memaddr
= (u64
)cq
->comps
;
656 memlen
= PAGE_ALIGN(sizeof(*cq
->comps
) * cq
->nentries
);
657 flags
|= VM_IO
| VM_DONTEXPAND
;
666 if ((vma
->vm_end
- vma
->vm_start
) != memlen
) {
667 hfi1_cdbg(PROC
, "%u:%u Memory size mismatch %lu:%lu",
668 uctxt
->ctxt
, fd
->subctxt
,
669 (vma
->vm_end
- vma
->vm_start
), memlen
);
674 vma
->vm_flags
= flags
;
676 "%u:%u type:%u io/vf:%d/%d, addr:0x%llx, len:%lu(%lu), flags:0x%lx\n",
677 ctxt
, subctxt
, type
, mapio
, vmf
, memaddr
, memlen
,
678 vma
->vm_end
- vma
->vm_start
, vma
->vm_flags
);
679 pfn
= (unsigned long)(memaddr
>> PAGE_SHIFT
);
682 vma
->vm_ops
= &vm_ops
;
685 ret
= io_remap_pfn_range(vma
, vma
->vm_start
, pfn
, memlen
,
688 ret
= remap_pfn_range(vma
, vma
->vm_start
, pfn
, memlen
,
696 * Local (non-chip) user memory is not mapped right away but as it is
697 * accessed by the user-level code.
699 static int vma_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
703 page
= vmalloc_to_page((void *)(vmf
->pgoff
<< PAGE_SHIFT
));
705 return VM_FAULT_SIGBUS
;
713 static unsigned int hfi1_poll(struct file
*fp
, struct poll_table_struct
*pt
)
715 struct hfi1_ctxtdata
*uctxt
;
718 uctxt
= ((struct hfi1_filedata
*)fp
->private_data
)->uctxt
;
721 else if (uctxt
->poll_type
== HFI1_POLL_TYPE_URGENT
)
722 pollflag
= poll_urgent(fp
, pt
);
723 else if (uctxt
->poll_type
== HFI1_POLL_TYPE_ANYRCV
)
724 pollflag
= poll_next(fp
, pt
);
731 static int hfi1_file_close(struct inode
*inode
, struct file
*fp
)
733 struct hfi1_filedata
*fdata
= fp
->private_data
;
734 struct hfi1_ctxtdata
*uctxt
= fdata
->uctxt
;
735 struct hfi1_devdata
*dd
;
736 unsigned long flags
, *ev
;
738 fp
->private_data
= NULL
;
743 hfi1_cdbg(PROC
, "freeing ctxt %u:%u", uctxt
->ctxt
, fdata
->subctxt
);
745 mutex_lock(&hfi1_mutex
);
748 /* drain user sdma queue */
749 hfi1_user_sdma_free_queues(fdata
);
751 /* release the cpu */
752 hfi1_put_proc_affinity(dd
, fdata
->rec_cpu_num
);
755 * Clear any left over, unhandled events so the next process that
756 * gets this context doesn't get confused.
758 ev
= dd
->events
+ ((uctxt
->ctxt
- dd
->first_user_ctxt
) *
759 HFI1_MAX_SHARED_CTXTS
) + fdata
->subctxt
;
763 uctxt
->active_slaves
&= ~(1 << fdata
->subctxt
);
764 uctxt
->subpid
[fdata
->subctxt
] = 0;
765 mutex_unlock(&hfi1_mutex
);
769 spin_lock_irqsave(&dd
->uctxt_lock
, flags
);
771 * Disable receive context and interrupt available, reset all
772 * RcvCtxtCtrl bits to default values.
774 hfi1_rcvctrl(dd
, HFI1_RCVCTRL_CTXT_DIS
|
775 HFI1_RCVCTRL_TIDFLOW_DIS
|
776 HFI1_RCVCTRL_INTRAVAIL_DIS
|
777 HFI1_RCVCTRL_TAILUPD_DIS
|
778 HFI1_RCVCTRL_ONE_PKT_EGR_DIS
|
779 HFI1_RCVCTRL_NO_RHQ_DROP_DIS
|
780 HFI1_RCVCTRL_NO_EGR_DROP_DIS
, uctxt
->ctxt
);
781 /* Clear the context's J_KEY */
782 hfi1_clear_ctxt_jkey(dd
, uctxt
->ctxt
);
784 * Reset context integrity checks to default.
785 * (writes to CSRs probably belong in chip.c)
787 write_kctxt_csr(dd
, uctxt
->sc
->hw_context
, SEND_CTXT_CHECK_ENABLE
,
788 hfi1_pkt_default_send_ctxt_mask(dd
, uctxt
->sc
->type
));
789 sc_disable(uctxt
->sc
);
791 spin_unlock_irqrestore(&dd
->uctxt_lock
, flags
);
793 dd
->rcd
[uctxt
->ctxt
] = NULL
;
794 uctxt
->rcvwait_to
= 0;
795 uctxt
->piowait_to
= 0;
796 uctxt
->rcvnowait
= 0;
797 uctxt
->pionowait
= 0;
798 uctxt
->event_flags
= 0;
800 hfi1_user_exp_rcv_free(fdata
);
801 hfi1_clear_ctxt_pkey(dd
, uctxt
->ctxt
);
803 hfi1_stats
.sps_ctxts
--;
804 if (++dd
->freectxts
== dd
->num_user_contexts
)
806 mutex_unlock(&hfi1_mutex
);
807 hfi1_free_ctxtdata(dd
, uctxt
);
814 * Convert kernel *virtual* addresses to physical addresses.
815 * This is used to vmalloc'ed addresses.
817 static u64
kvirt_to_phys(void *addr
)
822 page
= vmalloc_to_page(addr
);
824 paddr
= page_to_pfn(page
) << PAGE_SHIFT
;
829 static int assign_ctxt(struct file
*fp
, struct hfi1_user_info
*uinfo
)
831 int i_minor
, ret
= 0;
832 unsigned swmajor
, swminor
, alg
= HFI1_ALG_ACROSS
;
834 swmajor
= uinfo
->userversion
>> 16;
835 if (swmajor
!= HFI1_USER_SWMAJOR
) {
840 swminor
= uinfo
->userversion
& 0xffff;
842 if (uinfo
->hfi1_alg
< HFI1_ALG_COUNT
)
843 alg
= uinfo
->hfi1_alg
;
845 mutex_lock(&hfi1_mutex
);
846 /* First, lets check if we need to setup a shared context? */
847 if (uinfo
->subctxt_cnt
) {
848 struct hfi1_filedata
*fd
= fp
->private_data
;
850 ret
= find_shared_ctxt(fp
, uinfo
);
854 fd
->rec_cpu_num
= hfi1_get_proc_affinity(
855 fd
->uctxt
->dd
, fd
->uctxt
->numa_id
);
859 * We execute the following block if we couldn't find a
860 * shared context or if context sharing is not required.
863 i_minor
= iminor(file_inode(fp
)) - HFI1_USER_MINOR_BASE
;
864 ret
= get_user_context(fp
, uinfo
, i_minor
- 1, alg
);
867 mutex_unlock(&hfi1_mutex
);
872 /* return true if the device available for general use */
873 static int usable_device(struct hfi1_devdata
*dd
)
875 struct hfi1_pportdata
*ppd
= dd
->pport
;
877 return driver_lstate(ppd
) == IB_PORT_ACTIVE
;
880 static int get_user_context(struct file
*fp
, struct hfi1_user_info
*uinfo
,
881 int devno
, unsigned alg
)
883 struct hfi1_devdata
*dd
= NULL
;
884 int ret
= 0, devmax
, npresent
, nup
, dev
;
886 devmax
= hfi1_count_units(&npresent
, &nup
);
896 dd
= hfi1_lookup(devno
);
899 else if (!dd
->freectxts
)
902 struct hfi1_devdata
*pdd
;
904 if (alg
== HFI1_ALG_ACROSS
) {
907 for (dev
= 0; dev
< devmax
; dev
++) {
908 pdd
= hfi1_lookup(dev
);
911 if (!usable_device(pdd
))
913 if (pdd
->freectxts
&&
914 pdd
->freectxts
> free
) {
916 free
= pdd
->freectxts
;
920 for (dev
= 0; dev
< devmax
; dev
++) {
921 pdd
= hfi1_lookup(dev
);
924 if (!usable_device(pdd
))
926 if (pdd
->freectxts
) {
936 return ret
? ret
: allocate_ctxt(fp
, dd
, uinfo
);
939 static int find_shared_ctxt(struct file
*fp
,
940 const struct hfi1_user_info
*uinfo
)
944 struct hfi1_filedata
*fd
= fp
->private_data
;
946 devmax
= hfi1_count_units(NULL
, NULL
);
948 for (ndev
= 0; ndev
< devmax
; ndev
++) {
949 struct hfi1_devdata
*dd
= hfi1_lookup(ndev
);
951 if (!(dd
&& (dd
->flags
& HFI1_PRESENT
) && dd
->kregbase
))
953 for (i
= dd
->first_user_ctxt
; i
< dd
->num_rcv_contexts
; i
++) {
954 struct hfi1_ctxtdata
*uctxt
= dd
->rcd
[i
];
956 /* Skip ctxts which are not yet open */
957 if (!uctxt
|| !uctxt
->cnt
)
959 /* Skip ctxt if it doesn't match the requested one */
960 if (memcmp(uctxt
->uuid
, uinfo
->uuid
,
961 sizeof(uctxt
->uuid
)) ||
962 uctxt
->jkey
!= generate_jkey(current_uid()) ||
963 uctxt
->subctxt_id
!= uinfo
->subctxt_id
||
964 uctxt
->subctxt_cnt
!= uinfo
->subctxt_cnt
)
967 /* Verify the sharing process matches the master */
968 if (uctxt
->userversion
!= uinfo
->userversion
||
969 uctxt
->cnt
>= uctxt
->subctxt_cnt
) {
974 fd
->subctxt
= uctxt
->cnt
++;
975 uctxt
->subpid
[fd
->subctxt
] = current
->pid
;
976 uctxt
->active_slaves
|= 1 << fd
->subctxt
;
986 static int allocate_ctxt(struct file
*fp
, struct hfi1_devdata
*dd
,
987 struct hfi1_user_info
*uinfo
)
989 struct hfi1_filedata
*fd
= fp
->private_data
;
990 struct hfi1_ctxtdata
*uctxt
;
994 if (dd
->flags
& HFI1_FROZEN
) {
996 * Pick an error that is unique from all other errors
997 * that are returned so the user process knows that
998 * it tried to allocate while the SPC was frozen. It
999 * it should be able to retry with success in a short
1005 for (ctxt
= dd
->first_user_ctxt
; ctxt
< dd
->num_rcv_contexts
; ctxt
++)
1009 if (ctxt
== dd
->num_rcv_contexts
)
1012 fd
->rec_cpu_num
= hfi1_get_proc_affinity(dd
, -1);
1013 if (fd
->rec_cpu_num
!= -1)
1014 numa
= cpu_to_node(fd
->rec_cpu_num
);
1016 numa
= numa_node_id();
1017 uctxt
= hfi1_create_ctxtdata(dd
->pport
, ctxt
, numa
);
1020 "Unable to allocate ctxtdata memory, failing open\n");
1023 hfi1_cdbg(PROC
, "[%u:%u] pid %u assigned to CPU %d (NUMA %u)",
1024 uctxt
->ctxt
, fd
->subctxt
, current
->pid
, fd
->rec_cpu_num
,
1028 * Allocate and enable a PIO send context.
1030 uctxt
->sc
= sc_alloc(dd
, SC_USER
, uctxt
->rcvhdrqentsize
,
1035 hfi1_cdbg(PROC
, "allocated send context %u(%u)\n", uctxt
->sc
->sw_index
,
1036 uctxt
->sc
->hw_context
);
1037 ret
= sc_enable(uctxt
->sc
);
1041 * Setup shared context resources if the user-level has requested
1042 * shared contexts and this is the 'master' process.
1043 * This has to be done here so the rest of the sub-contexts find the
1046 if (uinfo
->subctxt_cnt
&& !fd
->subctxt
) {
1047 ret
= init_subctxts(uctxt
, uinfo
);
1049 * On error, we don't need to disable and de-allocate the
1050 * send context because it will be done during file close
1055 uctxt
->userversion
= uinfo
->userversion
;
1056 uctxt
->pid
= current
->pid
;
1057 uctxt
->flags
= HFI1_CAP_UGET(MASK
);
1058 init_waitqueue_head(&uctxt
->wait
);
1059 strlcpy(uctxt
->comm
, current
->comm
, sizeof(uctxt
->comm
));
1060 memcpy(uctxt
->uuid
, uinfo
->uuid
, sizeof(uctxt
->uuid
));
1061 uctxt
->jkey
= generate_jkey(current_uid());
1062 INIT_LIST_HEAD(&uctxt
->sdma_queues
);
1063 spin_lock_init(&uctxt
->sdma_qlock
);
1064 hfi1_stats
.sps_ctxts
++;
1066 * Disable ASPM when there are open user/PSM contexts to avoid
1067 * issues with ASPM L1 exit latency
1069 if (dd
->freectxts
-- == dd
->num_user_contexts
)
1070 aspm_disable_all(dd
);
1076 static int init_subctxts(struct hfi1_ctxtdata
*uctxt
,
1077 const struct hfi1_user_info
*uinfo
)
1079 unsigned num_subctxts
;
1081 num_subctxts
= uinfo
->subctxt_cnt
;
1082 if (num_subctxts
> HFI1_MAX_SHARED_CTXTS
)
1085 uctxt
->subctxt_cnt
= uinfo
->subctxt_cnt
;
1086 uctxt
->subctxt_id
= uinfo
->subctxt_id
;
1087 uctxt
->active_slaves
= 1;
1088 uctxt
->redirect_seq_cnt
= 1;
1089 set_bit(HFI1_CTXT_MASTER_UNINIT
, &uctxt
->event_flags
);
1094 static int setup_subctxt(struct hfi1_ctxtdata
*uctxt
)
1097 unsigned num_subctxts
= uctxt
->subctxt_cnt
;
1099 uctxt
->subctxt_uregbase
= vmalloc_user(PAGE_SIZE
);
1100 if (!uctxt
->subctxt_uregbase
) {
1104 /* We can take the size of the RcvHdr Queue from the master */
1105 uctxt
->subctxt_rcvhdr_base
= vmalloc_user(uctxt
->rcvhdrq_size
*
1107 if (!uctxt
->subctxt_rcvhdr_base
) {
1112 uctxt
->subctxt_rcvegrbuf
= vmalloc_user(uctxt
->egrbufs
.size
*
1114 if (!uctxt
->subctxt_rcvegrbuf
) {
1120 vfree(uctxt
->subctxt_rcvhdr_base
);
1122 vfree(uctxt
->subctxt_uregbase
);
1123 uctxt
->subctxt_uregbase
= NULL
;
1128 static int user_init(struct file
*fp
)
1131 unsigned int rcvctrl_ops
= 0;
1132 struct hfi1_filedata
*fd
= fp
->private_data
;
1133 struct hfi1_ctxtdata
*uctxt
= fd
->uctxt
;
1135 /* make sure that the context has already been setup */
1136 if (!test_bit(HFI1_CTXT_SETUP_DONE
, &uctxt
->event_flags
)) {
1142 * Subctxts don't need to initialize anything since master
1146 ret
= wait_event_interruptible(uctxt
->wait
, !test_bit(
1147 HFI1_CTXT_MASTER_UNINIT
,
1148 &uctxt
->event_flags
));
1152 /* initialize poll variables... */
1154 uctxt
->urgent_poll
= 0;
1157 * Now enable the ctxt for receive.
1158 * For chips that are set to DMA the tail register to memory
1159 * when they change (and when the update bit transitions from
1160 * 0 to 1. So for those chips, we turn it off and then back on.
1161 * This will (very briefly) affect any other open ctxts, but the
1162 * duration is very short, and therefore isn't an issue. We
1163 * explicitly set the in-memory tail copy to 0 beforehand, so we
1164 * don't have to wait to be sure the DMA update has happened
1165 * (chip resets head/tail to 0 on transition to enable).
1167 if (uctxt
->rcvhdrtail_kvaddr
)
1168 clear_rcvhdrtail(uctxt
);
1170 /* Setup J_KEY before enabling the context */
1171 hfi1_set_ctxt_jkey(uctxt
->dd
, uctxt
->ctxt
, uctxt
->jkey
);
1173 rcvctrl_ops
= HFI1_RCVCTRL_CTXT_ENB
;
1174 if (HFI1_CAP_KGET_MASK(uctxt
->flags
, HDRSUPP
))
1175 rcvctrl_ops
|= HFI1_RCVCTRL_TIDFLOW_ENB
;
1177 * Ignore the bit in the flags for now until proper
1178 * support for multiple packet per rcv array entry is
1181 if (!HFI1_CAP_KGET_MASK(uctxt
->flags
, MULTI_PKT_EGR
))
1182 rcvctrl_ops
|= HFI1_RCVCTRL_ONE_PKT_EGR_ENB
;
1183 if (HFI1_CAP_KGET_MASK(uctxt
->flags
, NODROP_EGR_FULL
))
1184 rcvctrl_ops
|= HFI1_RCVCTRL_NO_EGR_DROP_ENB
;
1185 if (HFI1_CAP_KGET_MASK(uctxt
->flags
, NODROP_RHQ_FULL
))
1186 rcvctrl_ops
|= HFI1_RCVCTRL_NO_RHQ_DROP_ENB
;
1188 * The RcvCtxtCtrl.TailUpd bit has to be explicitly written.
1189 * We can't rely on the correct value to be set from prior
1190 * uses of the chip or ctxt. Therefore, add the rcvctrl op
1193 if (HFI1_CAP_KGET_MASK(uctxt
->flags
, DMA_RTAIL
))
1194 rcvctrl_ops
|= HFI1_RCVCTRL_TAILUPD_ENB
;
1196 rcvctrl_ops
|= HFI1_RCVCTRL_TAILUPD_DIS
;
1197 hfi1_rcvctrl(uctxt
->dd
, rcvctrl_ops
, uctxt
->ctxt
);
1199 /* Notify any waiting slaves */
1200 if (uctxt
->subctxt_cnt
) {
1201 clear_bit(HFI1_CTXT_MASTER_UNINIT
, &uctxt
->event_flags
);
1202 wake_up(&uctxt
->wait
);
1207 * Expected receive has to be setup for all processes (including
1208 * shared contexts). However, it has to be done after the master
1209 * context has been fully configured as it depends on the
1210 * eager/expected split of the RcvArray entries.
1211 * Setting it up here ensures that the subcontexts will be waiting
1212 * (due to the above wait_event_interruptible() until the master
1215 ret
= hfi1_user_exp_rcv_init(fp
);
1220 static int get_ctxt_info(struct file
*fp
, void __user
*ubase
, __u32 len
)
1222 struct hfi1_ctxt_info cinfo
;
1223 struct hfi1_filedata
*fd
= fp
->private_data
;
1224 struct hfi1_ctxtdata
*uctxt
= fd
->uctxt
;
1227 memset(&cinfo
, 0, sizeof(cinfo
));
1228 ret
= hfi1_get_base_kinfo(uctxt
, &cinfo
);
1231 cinfo
.num_active
= hfi1_count_active_units();
1232 cinfo
.unit
= uctxt
->dd
->unit
;
1233 cinfo
.ctxt
= uctxt
->ctxt
;
1234 cinfo
.subctxt
= fd
->subctxt
;
1235 cinfo
.rcvtids
= roundup(uctxt
->egrbufs
.alloced
,
1236 uctxt
->dd
->rcv_entries
.group_size
) +
1237 uctxt
->expected_count
;
1238 cinfo
.credits
= uctxt
->sc
->credits
;
1239 cinfo
.numa_node
= uctxt
->numa_id
;
1240 cinfo
.rec_cpu
= fd
->rec_cpu_num
;
1241 cinfo
.send_ctxt
= uctxt
->sc
->hw_context
;
1243 cinfo
.egrtids
= uctxt
->egrbufs
.alloced
;
1244 cinfo
.rcvhdrq_cnt
= uctxt
->rcvhdrq_cnt
;
1245 cinfo
.rcvhdrq_entsize
= uctxt
->rcvhdrqentsize
<< 2;
1246 cinfo
.sdma_ring_size
= fd
->cq
->nentries
;
1247 cinfo
.rcvegr_size
= uctxt
->egrbufs
.rcvtid_size
;
1249 trace_hfi1_ctxt_info(uctxt
->dd
, uctxt
->ctxt
, fd
->subctxt
, cinfo
);
1250 if (copy_to_user(ubase
, &cinfo
, sizeof(cinfo
)))
1256 static int setup_ctxt(struct file
*fp
)
1258 struct hfi1_filedata
*fd
= fp
->private_data
;
1259 struct hfi1_ctxtdata
*uctxt
= fd
->uctxt
;
1260 struct hfi1_devdata
*dd
= uctxt
->dd
;
1264 * Context should be set up only once (including allocation and
1265 * programming of eager buffers. This is done if context sharing
1266 * is not requested or by the master process.
1268 if (!uctxt
->subctxt_cnt
|| !fd
->subctxt
) {
1269 ret
= hfi1_init_ctxt(uctxt
->sc
);
1273 /* Now allocate the RcvHdr queue and eager buffers. */
1274 ret
= hfi1_create_rcvhdrq(dd
, uctxt
);
1277 ret
= hfi1_setup_eagerbufs(uctxt
);
1280 if (uctxt
->subctxt_cnt
&& !fd
->subctxt
) {
1281 ret
= setup_subctxt(uctxt
);
1286 ret
= hfi1_user_sdma_alloc_queues(uctxt
, fp
);
1290 set_bit(HFI1_CTXT_SETUP_DONE
, &uctxt
->event_flags
);
1295 static int get_base_info(struct file
*fp
, void __user
*ubase
, __u32 len
)
1297 struct hfi1_base_info binfo
;
1298 struct hfi1_filedata
*fd
= fp
->private_data
;
1299 struct hfi1_ctxtdata
*uctxt
= fd
->uctxt
;
1300 struct hfi1_devdata
*dd
= uctxt
->dd
;
1305 trace_hfi1_uctxtdata(uctxt
->dd
, uctxt
);
1307 memset(&binfo
, 0, sizeof(binfo
));
1308 binfo
.hw_version
= dd
->revision
;
1309 binfo
.sw_version
= HFI1_KERN_SWVERSION
;
1310 binfo
.bthqp
= kdeth_qp
;
1311 binfo
.jkey
= uctxt
->jkey
;
1313 * If more than 64 contexts are enabled the allocated credit
1314 * return will span two or three contiguous pages. Since we only
1315 * map the page containing the context's credit return address,
1316 * we need to calculate the offset in the proper page.
1318 offset
= ((u64
)uctxt
->sc
->hw_free
-
1319 (u64
)dd
->cr_base
[uctxt
->numa_id
].va
) % PAGE_SIZE
;
1320 binfo
.sc_credits_addr
= HFI1_MMAP_TOKEN(PIO_CRED
, uctxt
->ctxt
,
1321 fd
->subctxt
, offset
);
1322 binfo
.pio_bufbase
= HFI1_MMAP_TOKEN(PIO_BUFS
, uctxt
->ctxt
,
1324 uctxt
->sc
->base_addr
);
1325 binfo
.pio_bufbase_sop
= HFI1_MMAP_TOKEN(PIO_BUFS_SOP
,
1328 uctxt
->sc
->base_addr
);
1329 binfo
.rcvhdr_bufbase
= HFI1_MMAP_TOKEN(RCV_HDRQ
, uctxt
->ctxt
,
1332 binfo
.rcvegr_bufbase
= HFI1_MMAP_TOKEN(RCV_EGRBUF
, uctxt
->ctxt
,
1334 uctxt
->egrbufs
.rcvtids
[0].phys
);
1335 binfo
.sdma_comp_bufbase
= HFI1_MMAP_TOKEN(SDMA_COMP
, uctxt
->ctxt
,
1339 * (RXE_PER_CONTEXT_USER + (ctxt * RXE_PER_CONTEXT_SIZE))
1341 binfo
.user_regbase
= HFI1_MMAP_TOKEN(UREGS
, uctxt
->ctxt
,
1343 offset
= offset_in_page((((uctxt
->ctxt
- dd
->first_user_ctxt
) *
1344 HFI1_MAX_SHARED_CTXTS
) + fd
->subctxt
) *
1345 sizeof(*dd
->events
));
1346 binfo
.events_bufbase
= HFI1_MMAP_TOKEN(EVENTS
, uctxt
->ctxt
,
1349 binfo
.status_bufbase
= HFI1_MMAP_TOKEN(STATUS
, uctxt
->ctxt
,
1352 if (HFI1_CAP_IS_USET(DMA_RTAIL
))
1353 binfo
.rcvhdrtail_base
= HFI1_MMAP_TOKEN(RTAIL
, uctxt
->ctxt
,
1355 if (uctxt
->subctxt_cnt
) {
1356 binfo
.subctxt_uregbase
= HFI1_MMAP_TOKEN(SUBCTXT_UREGS
,
1359 binfo
.subctxt_rcvhdrbuf
= HFI1_MMAP_TOKEN(SUBCTXT_RCV_HDRQ
,
1362 binfo
.subctxt_rcvegrbuf
= HFI1_MMAP_TOKEN(SUBCTXT_EGRBUF
,
1366 sz
= (len
< sizeof(binfo
)) ? len
: sizeof(binfo
);
1367 if (copy_to_user(ubase
, &binfo
, sz
))
1372 static unsigned int poll_urgent(struct file
*fp
,
1373 struct poll_table_struct
*pt
)
1375 struct hfi1_filedata
*fd
= fp
->private_data
;
1376 struct hfi1_ctxtdata
*uctxt
= fd
->uctxt
;
1377 struct hfi1_devdata
*dd
= uctxt
->dd
;
1380 poll_wait(fp
, &uctxt
->wait
, pt
);
1382 spin_lock_irq(&dd
->uctxt_lock
);
1383 if (uctxt
->urgent
!= uctxt
->urgent_poll
) {
1384 pollflag
= POLLIN
| POLLRDNORM
;
1385 uctxt
->urgent_poll
= uctxt
->urgent
;
1388 set_bit(HFI1_CTXT_WAITING_URG
, &uctxt
->event_flags
);
1390 spin_unlock_irq(&dd
->uctxt_lock
);
1395 static unsigned int poll_next(struct file
*fp
,
1396 struct poll_table_struct
*pt
)
1398 struct hfi1_filedata
*fd
= fp
->private_data
;
1399 struct hfi1_ctxtdata
*uctxt
= fd
->uctxt
;
1400 struct hfi1_devdata
*dd
= uctxt
->dd
;
1403 poll_wait(fp
, &uctxt
->wait
, pt
);
1405 spin_lock_irq(&dd
->uctxt_lock
);
1406 if (hdrqempty(uctxt
)) {
1407 set_bit(HFI1_CTXT_WAITING_RCV
, &uctxt
->event_flags
);
1408 hfi1_rcvctrl(dd
, HFI1_RCVCTRL_INTRAVAIL_ENB
, uctxt
->ctxt
);
1411 pollflag
= POLLIN
| POLLRDNORM
;
1413 spin_unlock_irq(&dd
->uctxt_lock
);
1419 * Find all user contexts in use, and set the specified bit in their
1421 * See also find_ctxt() for a similar use, that is specific to send buffers.
1423 int hfi1_set_uevent_bits(struct hfi1_pportdata
*ppd
, const int evtbit
)
1425 struct hfi1_ctxtdata
*uctxt
;
1426 struct hfi1_devdata
*dd
= ppd
->dd
;
1429 unsigned long flags
;
1436 spin_lock_irqsave(&dd
->uctxt_lock
, flags
);
1437 for (ctxt
= dd
->first_user_ctxt
; ctxt
< dd
->num_rcv_contexts
;
1439 uctxt
= dd
->rcd
[ctxt
];
1441 unsigned long *evs
= dd
->events
+
1442 (uctxt
->ctxt
- dd
->first_user_ctxt
) *
1443 HFI1_MAX_SHARED_CTXTS
;
1446 * subctxt_cnt is 0 if not shared, so do base
1447 * separately, first, then remaining subctxt, if any
1449 set_bit(evtbit
, evs
);
1450 for (i
= 1; i
< uctxt
->subctxt_cnt
; i
++)
1451 set_bit(evtbit
, evs
+ i
);
1454 spin_unlock_irqrestore(&dd
->uctxt_lock
, flags
);
1460 * manage_rcvq - manage a context's receive queue
1461 * @uctxt: the context
1462 * @subctxt: the sub-context
1463 * @start_stop: action to carry out
1465 * start_stop == 0 disables receive on the context, for use in queue
1466 * overflow conditions. start_stop==1 re-enables, to be used to
1467 * re-init the software copy of the head register
1469 static int manage_rcvq(struct hfi1_ctxtdata
*uctxt
, unsigned subctxt
,
1472 struct hfi1_devdata
*dd
= uctxt
->dd
;
1473 unsigned int rcvctrl_op
;
1477 /* atomically clear receive enable ctxt. */
1480 * On enable, force in-memory copy of the tail register to
1481 * 0, so that protocol code doesn't have to worry about
1482 * whether or not the chip has yet updated the in-memory
1483 * copy or not on return from the system call. The chip
1484 * always resets it's tail register back to 0 on a
1485 * transition from disabled to enabled.
1487 if (uctxt
->rcvhdrtail_kvaddr
)
1488 clear_rcvhdrtail(uctxt
);
1489 rcvctrl_op
= HFI1_RCVCTRL_CTXT_ENB
;
1491 rcvctrl_op
= HFI1_RCVCTRL_CTXT_DIS
;
1493 hfi1_rcvctrl(dd
, rcvctrl_op
, uctxt
->ctxt
);
1494 /* always; new head should be equal to new tail; see above */
1500 * clear the event notifier events for this context.
1501 * User process then performs actions appropriate to bit having been
1502 * set, if desired, and checks again in future.
1504 static int user_event_ack(struct hfi1_ctxtdata
*uctxt
, int subctxt
,
1505 unsigned long events
)
1508 struct hfi1_devdata
*dd
= uctxt
->dd
;
1514 evs
= dd
->events
+ ((uctxt
->ctxt
- dd
->first_user_ctxt
) *
1515 HFI1_MAX_SHARED_CTXTS
) + subctxt
;
1517 for (i
= 0; i
<= _HFI1_MAX_EVENT_BIT
; i
++) {
1518 if (!test_bit(i
, &events
))
1525 static int set_ctxt_pkey(struct hfi1_ctxtdata
*uctxt
, unsigned subctxt
,
1528 int ret
= -ENOENT
, i
, intable
= 0;
1529 struct hfi1_pportdata
*ppd
= uctxt
->ppd
;
1530 struct hfi1_devdata
*dd
= uctxt
->dd
;
1532 if (pkey
== LIM_MGMT_P_KEY
|| pkey
== FULL_MGMT_P_KEY
) {
1537 for (i
= 0; i
< ARRAY_SIZE(ppd
->pkeys
); i
++)
1538 if (pkey
== ppd
->pkeys
[i
]) {
1544 ret
= hfi1_set_ctxt_pkey(dd
, uctxt
->ctxt
, pkey
);
1549 static int ui_open(struct inode
*inode
, struct file
*filp
)
1551 struct hfi1_devdata
*dd
;
1553 dd
= container_of(inode
->i_cdev
, struct hfi1_devdata
, ui_cdev
);
1554 filp
->private_data
= dd
; /* for other methods */
1558 static int ui_release(struct inode
*inode
, struct file
*filp
)
1564 static loff_t
ui_lseek(struct file
*filp
, loff_t offset
, int whence
)
1566 struct hfi1_devdata
*dd
= filp
->private_data
;
1572 offset
+= filp
->f_pos
;
1575 offset
= ((dd
->kregend
- dd
->kregbase
) + DC8051_DATA_MEM_SIZE
) -
1585 if (offset
>= (dd
->kregend
- dd
->kregbase
) + DC8051_DATA_MEM_SIZE
)
1588 filp
->f_pos
= offset
;
1593 /* NOTE: assumes unsigned long is 8 bytes */
1594 static ssize_t
ui_read(struct file
*filp
, char __user
*buf
, size_t count
,
1597 struct hfi1_devdata
*dd
= filp
->private_data
;
1598 void __iomem
*base
= dd
->kregbase
;
1599 unsigned long total
, csr_off
,
1600 barlen
= (dd
->kregend
- dd
->kregbase
);
1603 /* only read 8 byte quantities */
1604 if ((count
% 8) != 0)
1606 /* offset must be 8-byte aligned */
1607 if ((*f_pos
% 8) != 0)
1609 /* destination buffer must be 8-byte aligned */
1610 if ((unsigned long)buf
% 8 != 0)
1612 /* must be in range */
1613 if (*f_pos
+ count
> (barlen
+ DC8051_DATA_MEM_SIZE
))
1615 /* only set the base if we are not starting past the BAR */
1616 if (*f_pos
< barlen
)
1619 for (total
= 0; total
< count
; total
+= 8, csr_off
+= 8) {
1620 /* accessing LCB CSRs requires more checks */
1621 if (is_lcb_offset(csr_off
)) {
1622 if (read_lcb_csr(dd
, csr_off
, (u64
*)&data
))
1626 * Cannot read ASIC GPIO/QSFP* clear and force CSRs without a
1627 * false parity error. Avoid the whole issue by not reading
1628 * them. These registers are defined as having a read value
1631 else if (csr_off
== ASIC_GPIO_CLEAR
||
1632 csr_off
== ASIC_GPIO_FORCE
||
1633 csr_off
== ASIC_QSFP1_CLEAR
||
1634 csr_off
== ASIC_QSFP1_FORCE
||
1635 csr_off
== ASIC_QSFP2_CLEAR
||
1636 csr_off
== ASIC_QSFP2_FORCE
)
1638 else if (csr_off
>= barlen
) {
1640 * read_8051_data can read more than just 8 bytes at
1641 * a time. However, folding this into the loop and
1642 * handling the reads in 8 byte increments allows us
1643 * to smoothly transition from chip memory to 8051
1646 if (read_8051_data(dd
,
1647 (u32
)(csr_off
- barlen
),
1648 sizeof(data
), &data
))
1651 data
= readq(base
+ total
);
1652 if (put_user(data
, (unsigned long __user
*)(buf
+ total
)))
1659 /* NOTE: assumes unsigned long is 8 bytes */
1660 static ssize_t
ui_write(struct file
*filp
, const char __user
*buf
,
1661 size_t count
, loff_t
*f_pos
)
1663 struct hfi1_devdata
*dd
= filp
->private_data
;
1665 unsigned long total
, data
, csr_off
;
1668 /* only write 8 byte quantities */
1669 if ((count
% 8) != 0)
1671 /* offset must be 8-byte aligned */
1672 if ((*f_pos
% 8) != 0)
1674 /* source buffer must be 8-byte aligned */
1675 if ((unsigned long)buf
% 8 != 0)
1677 /* must be in range */
1678 if (*f_pos
+ count
> dd
->kregend
- dd
->kregbase
)
1681 base
= (void __iomem
*)dd
->kregbase
+ *f_pos
;
1684 for (total
= 0; total
< count
; total
+= 8, csr_off
+= 8) {
1685 if (get_user(data
, (unsigned long __user
*)(buf
+ total
)))
1687 /* accessing LCB CSRs requires a special procedure */
1688 if (is_lcb_offset(csr_off
)) {
1690 int ret
= acquire_lcb_access(dd
, 1);
1698 release_lcb_access(dd
, 1);
1702 writeq(data
, base
+ total
);
1705 release_lcb_access(dd
, 1);
1710 static const struct file_operations ui_file_ops
= {
1711 .owner
= THIS_MODULE
,
1716 .release
= ui_release
,
1719 #define UI_OFFSET 192 /* device minor offset for UI devices */
1720 static int create_ui
= 1;
1722 static struct cdev wildcard_cdev
;
1723 static struct device
*wildcard_device
;
1725 static atomic_t user_count
= ATOMIC_INIT(0);
1727 static void user_remove(struct hfi1_devdata
*dd
)
1729 if (atomic_dec_return(&user_count
) == 0)
1730 hfi1_cdev_cleanup(&wildcard_cdev
, &wildcard_device
);
1732 hfi1_cdev_cleanup(&dd
->user_cdev
, &dd
->user_device
);
1733 hfi1_cdev_cleanup(&dd
->ui_cdev
, &dd
->ui_device
);
1736 static int user_add(struct hfi1_devdata
*dd
)
1741 if (atomic_inc_return(&user_count
) == 1) {
1742 ret
= hfi1_cdev_init(0, class_name(), &hfi1_file_ops
,
1743 &wildcard_cdev
, &wildcard_device
,
1749 snprintf(name
, sizeof(name
), "%s_%d", class_name(), dd
->unit
);
1750 ret
= hfi1_cdev_init(dd
->unit
+ 1, name
, &hfi1_file_ops
,
1751 &dd
->user_cdev
, &dd
->user_device
,
1757 snprintf(name
, sizeof(name
),
1758 "%s_ui%d", class_name(), dd
->unit
);
1759 ret
= hfi1_cdev_init(dd
->unit
+ UI_OFFSET
, name
, &ui_file_ops
,
1760 &dd
->ui_cdev
, &dd
->ui_device
,
1773 * Create per-unit files in /dev
1775 int hfi1_device_create(struct hfi1_devdata
*dd
)
1780 ret
= hfi1_diag_add(dd
);
1787 * Remove per-unit files in /dev
1788 * void, core kernel returns no errors for this stuff
1790 void hfi1_device_remove(struct hfi1_devdata
*dd
)
1793 hfi1_diag_remove(dd
);