staging/hfi1: Add function stubs for TID caching
[deliverable/linux.git] / drivers / staging / rdma / hfi1 / user_exp_rcv.c
CommitLineData
f727a0c3
MH
1/*
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2015 Intel Corporation.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * BSD LICENSE
20 *
21 * Copyright(c) 2015 Intel Corporation.
22 *
23 * Redistribution and use in source and binary forms, with or without
24 * modification, are permitted provided that the following conditions
25 * are met:
26 *
27 * - Redistributions of source code must retain the above copyright
28 * notice, this list of conditions and the following disclaimer.
29 * - Redistributions in binary form must reproduce the above copyright
30 * notice, this list of conditions and the following disclaimer in
31 * the documentation and/or other materials provided with the
32 * distribution.
33 * - Neither the name of Intel Corporation nor the names of its
34 * contributors may be used to endorse or promote products derived
35 * from this software without specific prior written permission.
36 *
37 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
38 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
39 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
40 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
41 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
42 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
43 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
44 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
45 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
46 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
47 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 *
49 */
50#include <asm/page.h>
51
52#include "user_exp_rcv.h"
53#include "trace.h"
54
55struct mmu_rb_node {
56 struct rb_node rbnode;
57 unsigned long virt;
58 unsigned long phys;
59 unsigned long len;
60 struct tid_group *grp;
61 u32 rcventry;
62 dma_addr_t dma_addr;
63 bool freed;
64 unsigned npages;
65 struct page *pages[0];
66};
67
68enum mmu_call_types {
69 MMU_INVALIDATE_PAGE = 0,
70 MMU_INVALIDATE_RANGE = 1
71};
72
73static const char * const mmu_types[] = {
74 "PAGE",
75 "RANGE"
76};
77
78static inline int mmu_addr_cmp(struct mmu_rb_node *, unsigned long,
79 unsigned long);
80static struct mmu_rb_node *mmu_rb_search_by_addr(struct rb_root *,
81 unsigned long) __maybe_unused;
82static inline struct mmu_rb_node *mmu_rb_search_by_entry(struct rb_root *,
83 u32);
84static int mmu_rb_insert_by_addr(struct rb_root *,
85 struct mmu_rb_node *) __maybe_unused;
86static int mmu_rb_insert_by_entry(struct rb_root *,
87 struct mmu_rb_node *) __maybe_unused;
88static void mmu_notifier_mem_invalidate(struct mmu_notifier *,
89 unsigned long, unsigned long,
90 enum mmu_call_types);
91static inline void mmu_notifier_page(struct mmu_notifier *, struct mm_struct *,
92 unsigned long);
93static inline void mmu_notifier_range_start(struct mmu_notifier *,
94 struct mm_struct *,
95 unsigned long, unsigned long);
96
97static struct mmu_notifier_ops __maybe_unused mn_opts = {
98 .invalidate_page = mmu_notifier_page,
99 .invalidate_range_start = mmu_notifier_range_start,
100};
101
102/*
103 * Initialize context and file private data needed for Expected
104 * receive caching. This needs to be done after the context has
105 * been configured with the eager/expected RcvEntry counts.
106 */
107int hfi1_user_exp_rcv_init(struct file *fp)
108{
109 return -EINVAL;
110}
111
112int hfi1_user_exp_rcv_free(struct hfi1_filedata *fd)
113{
114 return -EINVAL;
115}
116
117int hfi1_user_exp_rcv_setup(struct file *fp, struct hfi1_tid_info *tinfo)
118{
119 return -EINVAL;
120}
121
122int hfi1_user_exp_rcv_clear(struct file *fp, struct hfi1_tid_info *tinfo)
123{
124 return -EINVAL;
125}
126
127int hfi1_user_exp_rcv_invalid(struct file *fp, struct hfi1_tid_info *tinfo)
128{
129 return -EINVAL;
130}
131
132static inline void mmu_notifier_page(struct mmu_notifier *mn,
133 struct mm_struct *mm, unsigned long addr)
134{
135 mmu_notifier_mem_invalidate(mn, addr, addr + PAGE_SIZE,
136 MMU_INVALIDATE_PAGE);
137}
138
139static inline void mmu_notifier_range_start(struct mmu_notifier *mn,
140 struct mm_struct *mm,
141 unsigned long start,
142 unsigned long end)
143{
144 mmu_notifier_mem_invalidate(mn, start, end, MMU_INVALIDATE_RANGE);
145}
146
147static void mmu_notifier_mem_invalidate(struct mmu_notifier *mn,
148 unsigned long start, unsigned long end,
149 enum mmu_call_types type)
150{
151 /* Stub for now */
152}
153
154static inline int mmu_addr_cmp(struct mmu_rb_node *node, unsigned long addr,
155 unsigned long len)
156{
157 if ((addr + len) <= node->virt)
158 return -1;
159 else if (addr >= node->virt && addr < (node->virt + node->len))
160 return 0;
161 else
162 return 1;
163}
164
165static inline int mmu_entry_cmp(struct mmu_rb_node *node, u32 entry)
166{
167 if (entry < node->rcventry)
168 return -1;
169 else if (entry > node->rcventry)
170 return 1;
171 else
172 return 0;
173}
174
175static struct mmu_rb_node *mmu_rb_search_by_addr(struct rb_root *root,
176 unsigned long addr)
177{
178 struct rb_node *node = root->rb_node;
179
180 while (node) {
181 struct mmu_rb_node *mnode =
182 container_of(node, struct mmu_rb_node, rbnode);
183 /*
184 * When searching, use at least one page length for size. The
185 * MMU notifier will not give us anything less than that. We
186 * also don't need anything more than a page because we are
187 * guaranteed to have non-overlapping buffers in the tree.
188 */
189 int result = mmu_addr_cmp(mnode, addr, PAGE_SIZE);
190
191 if (result < 0)
192 node = node->rb_left;
193 else if (result > 0)
194 node = node->rb_right;
195 else
196 return mnode;
197 }
198 return NULL;
199}
200
201static inline struct mmu_rb_node *mmu_rb_search_by_entry(struct rb_root *root,
202 u32 index)
203{
204 struct mmu_rb_node *rbnode;
205 struct rb_node *node;
206
207 if (root && !RB_EMPTY_ROOT(root))
208 for (node = rb_first(root); node; node = rb_next(node)) {
209 rbnode = rb_entry(node, struct mmu_rb_node, rbnode);
210 if (rbnode->rcventry == index)
211 return rbnode;
212 }
213 return NULL;
214}
215
216static int mmu_rb_insert_by_entry(struct rb_root *root,
217 struct mmu_rb_node *node)
218{
219 struct rb_node **new = &root->rb_node, *parent = NULL;
220
221 while (*new) {
222 struct mmu_rb_node *this =
223 container_of(*new, struct mmu_rb_node, rbnode);
224 int result = mmu_entry_cmp(this, node->rcventry);
225
226 parent = *new;
227 if (result < 0)
228 new = &((*new)->rb_left);
229 else if (result > 0)
230 new = &((*new)->rb_right);
231 else
232 return 1;
233 }
234
235 rb_link_node(&node->rbnode, parent, new);
236 rb_insert_color(&node->rbnode, root);
237 return 0;
238}
239
240static int mmu_rb_insert_by_addr(struct rb_root *root, struct mmu_rb_node *node)
241{
242 struct rb_node **new = &root->rb_node, *parent = NULL;
243
244 /* Figure out where to put new node */
245 while (*new) {
246 struct mmu_rb_node *this =
247 container_of(*new, struct mmu_rb_node, rbnode);
248 int result = mmu_addr_cmp(this, node->virt, node->len);
249
250 parent = *new;
251 if (result < 0)
252 new = &((*new)->rb_left);
253 else if (result > 0)
254 new = &((*new)->rb_right);
255 else
256 return 1;
257 }
258
259 /* Add new node and rebalance tree. */
260 rb_link_node(&node->rbnode, parent, new);
261 rb_insert_color(&node->rbnode, root);
262
263 return 0;
264}
This page took 0.043468 seconds and 5 git commands to generate.