Commit | Line | Data |
---|---|---|
fcd8b7c0 AG |
1 | /* |
2 | * Copyright (c) 2006 Oracle. All rights reserved. | |
3 | * | |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
31 | * | |
32 | */ | |
33 | #include <linux/kernel.h> | |
5a0e3ad6 | 34 | #include <linux/slab.h> |
cb0a6056 | 35 | #include <linux/ratelimit.h> |
fcd8b7c0 AG |
36 | |
37 | #include "rds.h" | |
fcd8b7c0 AG |
38 | #include "iw.h" |
39 | ||
40 | ||
41 | /* | |
42 | * This is stored as mr->r_trans_private. | |
43 | */ | |
44 | struct rds_iw_mr { | |
45 | struct rds_iw_device *device; | |
46 | struct rds_iw_mr_pool *pool; | |
47 | struct rdma_cm_id *cm_id; | |
48 | ||
49 | struct ib_mr *mr; | |
fcd8b7c0 AG |
50 | |
51 | struct rds_iw_mapping mapping; | |
52 | unsigned char remap_count; | |
53 | }; | |
54 | ||
55 | /* | |
56 | * Our own little MR pool | |
57 | */ | |
58 | struct rds_iw_mr_pool { | |
59 | struct rds_iw_device *device; /* back ptr to the device that owns us */ | |
60 | ||
61 | struct mutex flush_lock; /* serialize fmr invalidate */ | |
62 | struct work_struct flush_worker; /* flush worker */ | |
63 | ||
64 | spinlock_t list_lock; /* protect variables below */ | |
65 | atomic_t item_count; /* total # of MRs */ | |
66 | atomic_t dirty_count; /* # dirty of MRs */ | |
67 | struct list_head dirty_list; /* dirty mappings */ | |
68 | struct list_head clean_list; /* unused & unamapped MRs */ | |
69 | atomic_t free_pinned; /* memory pinned by free MRs */ | |
70 | unsigned long max_message_size; /* in pages */ | |
71 | unsigned long max_items; | |
72 | unsigned long max_items_soft; | |
73 | unsigned long max_free_pinned; | |
74 | int max_pages; | |
75 | }; | |
76 | ||
d3ffaefa | 77 | static void rds_iw_flush_mr_pool(struct rds_iw_mr_pool *pool, int free_all); |
fcd8b7c0 | 78 | static void rds_iw_mr_pool_flush_worker(struct work_struct *work); |
9ddc8737 SG |
79 | static int rds_iw_init_reg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr); |
80 | static int rds_iw_map_reg(struct rds_iw_mr_pool *pool, | |
fcd8b7c0 AG |
81 | struct rds_iw_mr *ibmr, |
82 | struct scatterlist *sg, unsigned int nents); | |
83 | static void rds_iw_free_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr); | |
84 | static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool *pool, | |
85 | struct list_head *unmap_list, | |
85a64889 JL |
86 | struct list_head *kill_list, |
87 | int *unpinned); | |
fcd8b7c0 AG |
88 | static void rds_iw_destroy_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr); |
89 | ||
f862e07c AB |
90 | static int rds_iw_get_device(struct sockaddr_in *src, struct sockaddr_in *dst, |
91 | struct rds_iw_device **rds_iwdev, | |
92 | struct rdma_cm_id **cm_id) | |
fcd8b7c0 AG |
93 | { |
94 | struct rds_iw_device *iwdev; | |
95 | struct rds_iw_cm_id *i_cm_id; | |
96 | ||
97 | *rds_iwdev = NULL; | |
98 | *cm_id = NULL; | |
99 | ||
100 | list_for_each_entry(iwdev, &rds_iw_devices, list) { | |
101 | spin_lock_irq(&iwdev->spinlock); | |
102 | list_for_each_entry(i_cm_id, &iwdev->cm_id_list, list) { | |
103 | struct sockaddr_in *src_addr, *dst_addr; | |
104 | ||
105 | src_addr = (struct sockaddr_in *)&i_cm_id->cm_id->route.addr.src_addr; | |
106 | dst_addr = (struct sockaddr_in *)&i_cm_id->cm_id->route.addr.dst_addr; | |
107 | ||
108 | rdsdebug("local ipaddr = %x port %d, " | |
109 | "remote ipaddr = %x port %d" | |
110 | "..looking for %x port %d, " | |
111 | "remote ipaddr = %x port %d\n", | |
112 | src_addr->sin_addr.s_addr, | |
113 | src_addr->sin_port, | |
114 | dst_addr->sin_addr.s_addr, | |
115 | dst_addr->sin_port, | |
f862e07c AB |
116 | src->sin_addr.s_addr, |
117 | src->sin_port, | |
118 | dst->sin_addr.s_addr, | |
119 | dst->sin_port); | |
fcd8b7c0 | 120 | #ifdef WORKING_TUPLE_DETECTION |
f862e07c AB |
121 | if (src_addr->sin_addr.s_addr == src->sin_addr.s_addr && |
122 | src_addr->sin_port == src->sin_port && | |
123 | dst_addr->sin_addr.s_addr == dst->sin_addr.s_addr && | |
124 | dst_addr->sin_port == dst->sin_port) { | |
fcd8b7c0 AG |
125 | #else |
126 | /* FIXME - needs to compare the local and remote | |
127 | * ipaddr/port tuple, but the ipaddr is the only | |
25985edc | 128 | * available information in the rds_sock (as the rest are |
fcd8b7c0 AG |
129 | * zero'ed. It doesn't appear to be properly populated |
130 | * during connection setup... | |
131 | */ | |
f862e07c | 132 | if (src_addr->sin_addr.s_addr == src->sin_addr.s_addr) { |
fcd8b7c0 AG |
133 | #endif |
134 | spin_unlock_irq(&iwdev->spinlock); | |
135 | *rds_iwdev = iwdev; | |
136 | *cm_id = i_cm_id->cm_id; | |
137 | return 0; | |
138 | } | |
139 | } | |
140 | spin_unlock_irq(&iwdev->spinlock); | |
141 | } | |
142 | ||
143 | return 1; | |
144 | } | |
145 | ||
146 | static int rds_iw_add_cm_id(struct rds_iw_device *rds_iwdev, struct rdma_cm_id *cm_id) | |
147 | { | |
148 | struct rds_iw_cm_id *i_cm_id; | |
149 | ||
150 | i_cm_id = kmalloc(sizeof *i_cm_id, GFP_KERNEL); | |
151 | if (!i_cm_id) | |
152 | return -ENOMEM; | |
153 | ||
154 | i_cm_id->cm_id = cm_id; | |
155 | ||
156 | spin_lock_irq(&rds_iwdev->spinlock); | |
157 | list_add_tail(&i_cm_id->list, &rds_iwdev->cm_id_list); | |
158 | spin_unlock_irq(&rds_iwdev->spinlock); | |
159 | ||
160 | return 0; | |
161 | } | |
162 | ||
ff51bf84 | 163 | static void rds_iw_remove_cm_id(struct rds_iw_device *rds_iwdev, |
164 | struct rdma_cm_id *cm_id) | |
fcd8b7c0 AG |
165 | { |
166 | struct rds_iw_cm_id *i_cm_id; | |
167 | ||
168 | spin_lock_irq(&rds_iwdev->spinlock); | |
169 | list_for_each_entry(i_cm_id, &rds_iwdev->cm_id_list, list) { | |
170 | if (i_cm_id->cm_id == cm_id) { | |
171 | list_del(&i_cm_id->list); | |
172 | kfree(i_cm_id); | |
173 | break; | |
174 | } | |
175 | } | |
176 | spin_unlock_irq(&rds_iwdev->spinlock); | |
177 | } | |
178 | ||
179 | ||
180 | int rds_iw_update_cm_id(struct rds_iw_device *rds_iwdev, struct rdma_cm_id *cm_id) | |
181 | { | |
182 | struct sockaddr_in *src_addr, *dst_addr; | |
183 | struct rds_iw_device *rds_iwdev_old; | |
fcd8b7c0 AG |
184 | struct rdma_cm_id *pcm_id; |
185 | int rc; | |
186 | ||
187 | src_addr = (struct sockaddr_in *)&cm_id->route.addr.src_addr; | |
188 | dst_addr = (struct sockaddr_in *)&cm_id->route.addr.dst_addr; | |
189 | ||
f862e07c | 190 | rc = rds_iw_get_device(src_addr, dst_addr, &rds_iwdev_old, &pcm_id); |
fcd8b7c0 AG |
191 | if (rc) |
192 | rds_iw_remove_cm_id(rds_iwdev, cm_id); | |
193 | ||
194 | return rds_iw_add_cm_id(rds_iwdev, cm_id); | |
195 | } | |
196 | ||
745cbcca | 197 | void rds_iw_add_conn(struct rds_iw_device *rds_iwdev, struct rds_connection *conn) |
fcd8b7c0 AG |
198 | { |
199 | struct rds_iw_connection *ic = conn->c_transport_data; | |
200 | ||
201 | /* conn was previously on the nodev_conns_list */ | |
202 | spin_lock_irq(&iw_nodev_conns_lock); | |
203 | BUG_ON(list_empty(&iw_nodev_conns)); | |
204 | BUG_ON(list_empty(&ic->iw_node)); | |
205 | list_del(&ic->iw_node); | |
fcd8b7c0 | 206 | |
aef3ea33 | 207 | spin_lock(&rds_iwdev->spinlock); |
fcd8b7c0 | 208 | list_add_tail(&ic->iw_node, &rds_iwdev->conn_list); |
aef3ea33 | 209 | spin_unlock(&rds_iwdev->spinlock); |
745cbcca | 210 | spin_unlock_irq(&iw_nodev_conns_lock); |
fcd8b7c0 AG |
211 | |
212 | ic->rds_iwdev = rds_iwdev; | |
fcd8b7c0 AG |
213 | } |
214 | ||
745cbcca | 215 | void rds_iw_remove_conn(struct rds_iw_device *rds_iwdev, struct rds_connection *conn) |
fcd8b7c0 | 216 | { |
745cbcca | 217 | struct rds_iw_connection *ic = conn->c_transport_data; |
fcd8b7c0 | 218 | |
745cbcca AG |
219 | /* place conn on nodev_conns_list */ |
220 | spin_lock(&iw_nodev_conns_lock); | |
fcd8b7c0 | 221 | |
745cbcca AG |
222 | spin_lock_irq(&rds_iwdev->spinlock); |
223 | BUG_ON(list_empty(&ic->iw_node)); | |
224 | list_del(&ic->iw_node); | |
225 | spin_unlock_irq(&rds_iwdev->spinlock); | |
226 | ||
227 | list_add_tail(&ic->iw_node, &iw_nodev_conns); | |
228 | ||
229 | spin_unlock(&iw_nodev_conns_lock); | |
230 | ||
231 | rds_iw_remove_cm_id(ic->rds_iwdev, ic->i_cm_id); | |
232 | ic->rds_iwdev = NULL; | |
fcd8b7c0 AG |
233 | } |
234 | ||
745cbcca | 235 | void __rds_iw_destroy_conns(struct list_head *list, spinlock_t *list_lock) |
fcd8b7c0 AG |
236 | { |
237 | struct rds_iw_connection *ic, *_ic; | |
238 | LIST_HEAD(tmp_list); | |
239 | ||
240 | /* avoid calling conn_destroy with irqs off */ | |
745cbcca AG |
241 | spin_lock_irq(list_lock); |
242 | list_splice(list, &tmp_list); | |
243 | INIT_LIST_HEAD(list); | |
244 | spin_unlock_irq(list_lock); | |
fcd8b7c0 | 245 | |
433d308d | 246 | list_for_each_entry_safe(ic, _ic, &tmp_list, iw_node) |
fcd8b7c0 | 247 | rds_conn_destroy(ic->conn); |
fcd8b7c0 AG |
248 | } |
249 | ||
250 | static void rds_iw_set_scatterlist(struct rds_iw_scatterlist *sg, | |
251 | struct scatterlist *list, unsigned int sg_len) | |
252 | { | |
253 | sg->list = list; | |
254 | sg->len = sg_len; | |
255 | sg->dma_len = 0; | |
256 | sg->dma_npages = 0; | |
257 | sg->bytes = 0; | |
258 | } | |
259 | ||
9ddc8737 SG |
260 | static int rds_iw_map_scatterlist(struct rds_iw_device *rds_iwdev, |
261 | struct rds_iw_scatterlist *sg) | |
fcd8b7c0 AG |
262 | { |
263 | struct ib_device *dev = rds_iwdev->dev; | |
9ddc8737 | 264 | int i, ret; |
fcd8b7c0 | 265 | |
fcd8b7c0 AG |
266 | WARN_ON(sg->dma_len); |
267 | ||
268 | sg->dma_len = ib_dma_map_sg(dev, sg->list, sg->len, DMA_BIDIRECTIONAL); | |
269 | if (unlikely(!sg->dma_len)) { | |
270 | printk(KERN_WARNING "RDS/IW: dma_map_sg failed!\n"); | |
9ddc8737 | 271 | return -EBUSY; |
fcd8b7c0 AG |
272 | } |
273 | ||
274 | sg->bytes = 0; | |
275 | sg->dma_npages = 0; | |
276 | ||
277 | ret = -EINVAL; | |
278 | for (i = 0; i < sg->dma_len; ++i) { | |
279 | unsigned int dma_len = ib_sg_dma_len(dev, &sg->list[i]); | |
280 | u64 dma_addr = ib_sg_dma_address(dev, &sg->list[i]); | |
281 | u64 end_addr; | |
282 | ||
283 | sg->bytes += dma_len; | |
284 | ||
285 | end_addr = dma_addr + dma_len; | |
404bb72a | 286 | if (dma_addr & PAGE_MASK) { |
fcd8b7c0 AG |
287 | if (i > 0) |
288 | goto out_unmap; | |
404bb72a | 289 | dma_addr &= ~PAGE_MASK; |
fcd8b7c0 | 290 | } |
404bb72a | 291 | if (end_addr & PAGE_MASK) { |
fcd8b7c0 AG |
292 | if (i < sg->dma_len - 1) |
293 | goto out_unmap; | |
404bb72a | 294 | end_addr = (end_addr + PAGE_MASK) & ~PAGE_MASK; |
fcd8b7c0 AG |
295 | } |
296 | ||
404bb72a | 297 | sg->dma_npages += (end_addr - dma_addr) >> PAGE_SHIFT; |
fcd8b7c0 AG |
298 | } |
299 | ||
300 | /* Now gather the dma addrs into one list */ | |
301 | if (sg->dma_npages > fastreg_message_size) | |
302 | goto out_unmap; | |
303 | ||
fcd8b7c0 | 304 | |
fcd8b7c0 | 305 | |
9ddc8737 | 306 | return 0; |
fcd8b7c0 AG |
307 | |
308 | out_unmap: | |
309 | ib_dma_unmap_sg(rds_iwdev->dev, sg->list, sg->len, DMA_BIDIRECTIONAL); | |
310 | sg->dma_len = 0; | |
9ddc8737 | 311 | return ret; |
fcd8b7c0 AG |
312 | } |
313 | ||
314 | ||
315 | struct rds_iw_mr_pool *rds_iw_create_mr_pool(struct rds_iw_device *rds_iwdev) | |
316 | { | |
317 | struct rds_iw_mr_pool *pool; | |
318 | ||
319 | pool = kzalloc(sizeof(*pool), GFP_KERNEL); | |
320 | if (!pool) { | |
321 | printk(KERN_WARNING "RDS/IW: rds_iw_create_mr_pool alloc error\n"); | |
322 | return ERR_PTR(-ENOMEM); | |
323 | } | |
324 | ||
325 | pool->device = rds_iwdev; | |
326 | INIT_LIST_HEAD(&pool->dirty_list); | |
327 | INIT_LIST_HEAD(&pool->clean_list); | |
328 | mutex_init(&pool->flush_lock); | |
329 | spin_lock_init(&pool->list_lock); | |
330 | INIT_WORK(&pool->flush_worker, rds_iw_mr_pool_flush_worker); | |
331 | ||
332 | pool->max_message_size = fastreg_message_size; | |
333 | pool->max_items = fastreg_pool_size; | |
334 | pool->max_free_pinned = pool->max_items * pool->max_message_size / 4; | |
335 | pool->max_pages = fastreg_message_size; | |
336 | ||
337 | /* We never allow more than max_items MRs to be allocated. | |
338 | * When we exceed more than max_items_soft, we start freeing | |
339 | * items more aggressively. | |
340 | * Make sure that max_items > max_items_soft > max_items / 2 | |
341 | */ | |
342 | pool->max_items_soft = pool->max_items * 3 / 4; | |
343 | ||
344 | return pool; | |
345 | } | |
346 | ||
347 | void rds_iw_get_mr_info(struct rds_iw_device *rds_iwdev, struct rds_info_rdma_connection *iinfo) | |
348 | { | |
349 | struct rds_iw_mr_pool *pool = rds_iwdev->mr_pool; | |
350 | ||
351 | iinfo->rdma_mr_max = pool->max_items; | |
352 | iinfo->rdma_mr_size = pool->max_pages; | |
353 | } | |
354 | ||
355 | void rds_iw_destroy_mr_pool(struct rds_iw_mr_pool *pool) | |
356 | { | |
357 | flush_workqueue(rds_wq); | |
358 | rds_iw_flush_mr_pool(pool, 1); | |
359 | BUG_ON(atomic_read(&pool->item_count)); | |
360 | BUG_ON(atomic_read(&pool->free_pinned)); | |
361 | kfree(pool); | |
362 | } | |
363 | ||
364 | static inline struct rds_iw_mr *rds_iw_reuse_fmr(struct rds_iw_mr_pool *pool) | |
365 | { | |
366 | struct rds_iw_mr *ibmr = NULL; | |
367 | unsigned long flags; | |
368 | ||
369 | spin_lock_irqsave(&pool->list_lock, flags); | |
370 | if (!list_empty(&pool->clean_list)) { | |
371 | ibmr = list_entry(pool->clean_list.next, struct rds_iw_mr, mapping.m_list); | |
372 | list_del_init(&ibmr->mapping.m_list); | |
373 | } | |
374 | spin_unlock_irqrestore(&pool->list_lock, flags); | |
375 | ||
376 | return ibmr; | |
377 | } | |
378 | ||
379 | static struct rds_iw_mr *rds_iw_alloc_mr(struct rds_iw_device *rds_iwdev) | |
380 | { | |
381 | struct rds_iw_mr_pool *pool = rds_iwdev->mr_pool; | |
382 | struct rds_iw_mr *ibmr = NULL; | |
383 | int err = 0, iter = 0; | |
384 | ||
385 | while (1) { | |
386 | ibmr = rds_iw_reuse_fmr(pool); | |
387 | if (ibmr) | |
388 | return ibmr; | |
389 | ||
390 | /* No clean MRs - now we have the choice of either | |
391 | * allocating a fresh MR up to the limit imposed by the | |
392 | * driver, or flush any dirty unused MRs. | |
393 | * We try to avoid stalling in the send path if possible, | |
394 | * so we allocate as long as we're allowed to. | |
395 | * | |
396 | * We're fussy with enforcing the FMR limit, though. If the driver | |
397 | * tells us we can't use more than N fmrs, we shouldn't start | |
398 | * arguing with it */ | |
399 | if (atomic_inc_return(&pool->item_count) <= pool->max_items) | |
400 | break; | |
401 | ||
402 | atomic_dec(&pool->item_count); | |
403 | ||
404 | if (++iter > 2) { | |
405 | rds_iw_stats_inc(s_iw_rdma_mr_pool_depleted); | |
406 | return ERR_PTR(-EAGAIN); | |
407 | } | |
408 | ||
409 | /* We do have some empty MRs. Flush them out. */ | |
410 | rds_iw_stats_inc(s_iw_rdma_mr_pool_wait); | |
411 | rds_iw_flush_mr_pool(pool, 0); | |
412 | } | |
413 | ||
414 | ibmr = kzalloc(sizeof(*ibmr), GFP_KERNEL); | |
415 | if (!ibmr) { | |
416 | err = -ENOMEM; | |
417 | goto out_no_cigar; | |
418 | } | |
419 | ||
420 | spin_lock_init(&ibmr->mapping.m_lock); | |
421 | INIT_LIST_HEAD(&ibmr->mapping.m_list); | |
422 | ibmr->mapping.m_mr = ibmr; | |
423 | ||
9ddc8737 | 424 | err = rds_iw_init_reg(pool, ibmr); |
fcd8b7c0 AG |
425 | if (err) |
426 | goto out_no_cigar; | |
427 | ||
428 | rds_iw_stats_inc(s_iw_rdma_mr_alloc); | |
429 | return ibmr; | |
430 | ||
431 | out_no_cigar: | |
432 | if (ibmr) { | |
433 | rds_iw_destroy_fastreg(pool, ibmr); | |
434 | kfree(ibmr); | |
435 | } | |
436 | atomic_dec(&pool->item_count); | |
437 | return ERR_PTR(err); | |
438 | } | |
439 | ||
440 | void rds_iw_sync_mr(void *trans_private, int direction) | |
441 | { | |
442 | struct rds_iw_mr *ibmr = trans_private; | |
443 | struct rds_iw_device *rds_iwdev = ibmr->device; | |
444 | ||
445 | switch (direction) { | |
446 | case DMA_FROM_DEVICE: | |
447 | ib_dma_sync_sg_for_cpu(rds_iwdev->dev, ibmr->mapping.m_sg.list, | |
448 | ibmr->mapping.m_sg.dma_len, DMA_BIDIRECTIONAL); | |
449 | break; | |
450 | case DMA_TO_DEVICE: | |
451 | ib_dma_sync_sg_for_device(rds_iwdev->dev, ibmr->mapping.m_sg.list, | |
452 | ibmr->mapping.m_sg.dma_len, DMA_BIDIRECTIONAL); | |
453 | break; | |
454 | } | |
455 | } | |
456 | ||
fcd8b7c0 AG |
457 | /* |
458 | * Flush our pool of MRs. | |
459 | * At a minimum, all currently unused MRs are unmapped. | |
460 | * If the number of MRs allocated exceeds the limit, we also try | |
461 | * to free as many MRs as needed to get back to this limit. | |
462 | */ | |
d3ffaefa | 463 | static void rds_iw_flush_mr_pool(struct rds_iw_mr_pool *pool, int free_all) |
fcd8b7c0 AG |
464 | { |
465 | struct rds_iw_mr *ibmr, *next; | |
466 | LIST_HEAD(unmap_list); | |
467 | LIST_HEAD(kill_list); | |
468 | unsigned long flags; | |
5b7bf42e | 469 | unsigned int nfreed = 0, ncleaned = 0, unpinned = 0; |
fcd8b7c0 AG |
470 | |
471 | rds_iw_stats_inc(s_iw_rdma_mr_pool_flush); | |
472 | ||
473 | mutex_lock(&pool->flush_lock); | |
474 | ||
475 | spin_lock_irqsave(&pool->list_lock, flags); | |
476 | /* Get the list of all mappings to be destroyed */ | |
477 | list_splice_init(&pool->dirty_list, &unmap_list); | |
478 | if (free_all) | |
479 | list_splice_init(&pool->clean_list, &kill_list); | |
480 | spin_unlock_irqrestore(&pool->list_lock, flags); | |
481 | ||
fcd8b7c0 AG |
482 | /* Batched invalidate of dirty MRs. |
483 | * For FMR based MRs, the mappings on the unmap list are | |
484 | * actually members of an ibmr (ibmr->mapping). They either | |
485 | * migrate to the kill_list, or have been cleaned and should be | |
486 | * moved to the clean_list. | |
487 | * For fastregs, they will be dynamically allocated, and | |
488 | * will be destroyed by the unmap function. | |
489 | */ | |
490 | if (!list_empty(&unmap_list)) { | |
85a64889 JL |
491 | ncleaned = rds_iw_unmap_fastreg_list(pool, &unmap_list, |
492 | &kill_list, &unpinned); | |
fcd8b7c0 AG |
493 | /* If we've been asked to destroy all MRs, move those |
494 | * that were simply cleaned to the kill list */ | |
495 | if (free_all) | |
496 | list_splice_init(&unmap_list, &kill_list); | |
497 | } | |
498 | ||
499 | /* Destroy any MRs that are past their best before date */ | |
500 | list_for_each_entry_safe(ibmr, next, &kill_list, mapping.m_list) { | |
501 | rds_iw_stats_inc(s_iw_rdma_mr_free); | |
502 | list_del(&ibmr->mapping.m_list); | |
503 | rds_iw_destroy_fastreg(pool, ibmr); | |
504 | kfree(ibmr); | |
505 | nfreed++; | |
506 | } | |
507 | ||
508 | /* Anything that remains are laundered ibmrs, which we can add | |
509 | * back to the clean list. */ | |
510 | if (!list_empty(&unmap_list)) { | |
511 | spin_lock_irqsave(&pool->list_lock, flags); | |
512 | list_splice(&unmap_list, &pool->clean_list); | |
513 | spin_unlock_irqrestore(&pool->list_lock, flags); | |
514 | } | |
515 | ||
85a64889 | 516 | atomic_sub(unpinned, &pool->free_pinned); |
fcd8b7c0 AG |
517 | atomic_sub(ncleaned, &pool->dirty_count); |
518 | atomic_sub(nfreed, &pool->item_count); | |
519 | ||
520 | mutex_unlock(&pool->flush_lock); | |
fcd8b7c0 AG |
521 | } |
522 | ||
523 | static void rds_iw_mr_pool_flush_worker(struct work_struct *work) | |
524 | { | |
525 | struct rds_iw_mr_pool *pool = container_of(work, struct rds_iw_mr_pool, flush_worker); | |
526 | ||
527 | rds_iw_flush_mr_pool(pool, 0); | |
528 | } | |
529 | ||
530 | void rds_iw_free_mr(void *trans_private, int invalidate) | |
531 | { | |
532 | struct rds_iw_mr *ibmr = trans_private; | |
533 | struct rds_iw_mr_pool *pool = ibmr->device->mr_pool; | |
534 | ||
535 | rdsdebug("RDS/IW: free_mr nents %u\n", ibmr->mapping.m_sg.len); | |
536 | if (!pool) | |
537 | return; | |
538 | ||
539 | /* Return it to the pool's free list */ | |
540 | rds_iw_free_fastreg(pool, ibmr); | |
541 | ||
542 | /* If we've pinned too many pages, request a flush */ | |
f64f9e71 JP |
543 | if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned || |
544 | atomic_read(&pool->dirty_count) >= pool->max_items / 10) | |
fcd8b7c0 AG |
545 | queue_work(rds_wq, &pool->flush_worker); |
546 | ||
547 | if (invalidate) { | |
548 | if (likely(!in_interrupt())) { | |
549 | rds_iw_flush_mr_pool(pool, 0); | |
550 | } else { | |
551 | /* We get here if the user created a MR marked | |
552 | * as use_once and invalidate at the same time. */ | |
553 | queue_work(rds_wq, &pool->flush_worker); | |
554 | } | |
555 | } | |
556 | } | |
557 | ||
558 | void rds_iw_flush_mrs(void) | |
559 | { | |
560 | struct rds_iw_device *rds_iwdev; | |
561 | ||
562 | list_for_each_entry(rds_iwdev, &rds_iw_devices, list) { | |
563 | struct rds_iw_mr_pool *pool = rds_iwdev->mr_pool; | |
564 | ||
565 | if (pool) | |
566 | rds_iw_flush_mr_pool(pool, 0); | |
567 | } | |
568 | } | |
569 | ||
570 | void *rds_iw_get_mr(struct scatterlist *sg, unsigned long nents, | |
571 | struct rds_sock *rs, u32 *key_ret) | |
572 | { | |
573 | struct rds_iw_device *rds_iwdev; | |
574 | struct rds_iw_mr *ibmr = NULL; | |
575 | struct rdma_cm_id *cm_id; | |
f862e07c AB |
576 | struct sockaddr_in src = { |
577 | .sin_addr.s_addr = rs->rs_bound_addr, | |
578 | .sin_port = rs->rs_bound_port, | |
579 | }; | |
580 | struct sockaddr_in dst = { | |
581 | .sin_addr.s_addr = rs->rs_conn_addr, | |
582 | .sin_port = rs->rs_conn_port, | |
583 | }; | |
fcd8b7c0 AG |
584 | int ret; |
585 | ||
f862e07c | 586 | ret = rds_iw_get_device(&src, &dst, &rds_iwdev, &cm_id); |
fcd8b7c0 AG |
587 | if (ret || !cm_id) { |
588 | ret = -ENODEV; | |
589 | goto out; | |
590 | } | |
591 | ||
592 | if (!rds_iwdev->mr_pool) { | |
593 | ret = -ENODEV; | |
594 | goto out; | |
595 | } | |
596 | ||
597 | ibmr = rds_iw_alloc_mr(rds_iwdev); | |
598 | if (IS_ERR(ibmr)) | |
599 | return ibmr; | |
600 | ||
601 | ibmr->cm_id = cm_id; | |
602 | ibmr->device = rds_iwdev; | |
603 | ||
9ddc8737 | 604 | ret = rds_iw_map_reg(rds_iwdev->mr_pool, ibmr, sg, nents); |
fcd8b7c0 AG |
605 | if (ret == 0) |
606 | *key_ret = ibmr->mr->rkey; | |
607 | else | |
608 | printk(KERN_WARNING "RDS/IW: failed to map mr (errno=%d)\n", ret); | |
609 | ||
610 | out: | |
611 | if (ret) { | |
612 | if (ibmr) | |
613 | rds_iw_free_mr(ibmr, 0); | |
614 | ibmr = ERR_PTR(ret); | |
615 | } | |
616 | return ibmr; | |
617 | } | |
618 | ||
619 | /* | |
9ddc8737 | 620 | * iWARP reg handling |
fcd8b7c0 AG |
621 | * |
622 | * The life cycle of a fastreg registration is a bit different from | |
623 | * FMRs. | |
624 | * The idea behind fastreg is to have one MR, to which we bind different | |
625 | * mappings over time. To avoid stalling on the expensive map and invalidate | |
626 | * operations, these operations are pipelined on the same send queue on | |
627 | * which we want to send the message containing the r_key. | |
628 | * | |
629 | * This creates a bit of a problem for us, as we do not have the destination | |
630 | * IP in GET_MR, so the connection must be setup prior to the GET_MR call for | |
631 | * RDMA to be correctly setup. If a fastreg request is present, rds_iw_xmit | |
9ddc8737 | 632 | * will try to queue a LOCAL_INV (if needed) and a REG_MR work request |
fcd8b7c0 AG |
633 | * before queuing the SEND. When completions for these arrive, they are |
634 | * dispatched to the MR has a bit set showing that RDMa can be performed. | |
635 | * | |
636 | * There is another interesting aspect that's related to invalidation. | |
637 | * The application can request that a mapping is invalidated in FREE_MR. | |
638 | * The expectation there is that this invalidation step includes ALL | |
639 | * PREVIOUSLY FREED MRs. | |
640 | */ | |
9ddc8737 SG |
641 | static int rds_iw_init_reg(struct rds_iw_mr_pool *pool, |
642 | struct rds_iw_mr *ibmr) | |
fcd8b7c0 AG |
643 | { |
644 | struct rds_iw_device *rds_iwdev = pool->device; | |
fcd8b7c0 AG |
645 | struct ib_mr *mr; |
646 | int err; | |
647 | ||
fc279959 SG |
648 | mr = ib_alloc_mr(rds_iwdev->pd, IB_MR_TYPE_MEM_REG, |
649 | pool->max_message_size); | |
fcd8b7c0 AG |
650 | if (IS_ERR(mr)) { |
651 | err = PTR_ERR(mr); | |
652 | ||
fc279959 | 653 | printk(KERN_WARNING "RDS/IW: ib_alloc_mr failed (err=%d)\n", err); |
fcd8b7c0 AG |
654 | return err; |
655 | } | |
656 | ||
fcd8b7c0 AG |
657 | ibmr->mr = mr; |
658 | return 0; | |
659 | } | |
660 | ||
9ddc8737 | 661 | static int rds_iw_rdma_reg_mr(struct rds_iw_mapping *mapping) |
fcd8b7c0 AG |
662 | { |
663 | struct rds_iw_mr *ibmr = mapping->m_mr; | |
9ddc8737 SG |
664 | struct rds_iw_scatterlist *m_sg = &mapping->m_sg; |
665 | struct ib_reg_wr reg_wr; | |
e622f2f4 | 666 | struct ib_send_wr *failed_wr; |
9ddc8737 SG |
667 | int ret, n; |
668 | ||
669 | n = ib_map_mr_sg_zbva(ibmr->mr, m_sg->list, m_sg->len, PAGE_SIZE); | |
670 | if (unlikely(n != m_sg->len)) | |
671 | return n < 0 ? n : -EINVAL; | |
672 | ||
673 | reg_wr.wr.next = NULL; | |
674 | reg_wr.wr.opcode = IB_WR_REG_MR; | |
675 | reg_wr.wr.wr_id = RDS_IW_REG_WR_ID; | |
676 | reg_wr.wr.num_sge = 0; | |
677 | reg_wr.mr = ibmr->mr; | |
678 | reg_wr.key = mapping->m_rkey; | |
679 | reg_wr.access = IB_ACCESS_LOCAL_WRITE | | |
680 | IB_ACCESS_REMOTE_READ | | |
681 | IB_ACCESS_REMOTE_WRITE; | |
fcd8b7c0 AG |
682 | |
683 | /* | |
9ddc8737 | 684 | * Perform a WR for the reg_mr. Each individual page |
fcd8b7c0 | 685 | * in the sg list is added to the fast reg page list and placed |
9ddc8737 | 686 | * inside the reg_mr WR. The key used is a rolling 8bit |
fcd8b7c0 AG |
687 | * counter, which should guarantee uniqueness. |
688 | */ | |
689 | ib_update_fast_reg_key(ibmr->mr, ibmr->remap_count++); | |
690 | mapping->m_rkey = ibmr->mr->rkey; | |
691 | ||
9ddc8737 SG |
692 | failed_wr = ®_wr.wr; |
693 | ret = ib_post_send(ibmr->cm_id->qp, ®_wr.wr, &failed_wr); | |
694 | BUG_ON(failed_wr != ®_wr.wr); | |
cb0a6056 MZ |
695 | if (ret) |
696 | printk_ratelimited(KERN_WARNING "RDS/IW: %s:%d ib_post_send returned %d\n", | |
fcd8b7c0 AG |
697 | __func__, __LINE__, ret); |
698 | return ret; | |
699 | } | |
700 | ||
701 | static int rds_iw_rdma_fastreg_inv(struct rds_iw_mr *ibmr) | |
702 | { | |
703 | struct ib_send_wr s_wr, *failed_wr; | |
704 | int ret = 0; | |
705 | ||
706 | if (!ibmr->cm_id->qp || !ibmr->mr) | |
707 | goto out; | |
708 | ||
709 | memset(&s_wr, 0, sizeof(s_wr)); | |
710 | s_wr.wr_id = RDS_IW_LOCAL_INV_WR_ID; | |
711 | s_wr.opcode = IB_WR_LOCAL_INV; | |
712 | s_wr.ex.invalidate_rkey = ibmr->mr->rkey; | |
713 | s_wr.send_flags = IB_SEND_SIGNALED; | |
714 | ||
715 | failed_wr = &s_wr; | |
716 | ret = ib_post_send(ibmr->cm_id->qp, &s_wr, &failed_wr); | |
cb0a6056 MZ |
717 | if (ret) { |
718 | printk_ratelimited(KERN_WARNING "RDS/IW: %s:%d ib_post_send returned %d\n", | |
fcd8b7c0 AG |
719 | __func__, __LINE__, ret); |
720 | goto out; | |
721 | } | |
722 | out: | |
723 | return ret; | |
724 | } | |
725 | ||
9ddc8737 SG |
726 | static int rds_iw_map_reg(struct rds_iw_mr_pool *pool, |
727 | struct rds_iw_mr *ibmr, | |
728 | struct scatterlist *sg, | |
729 | unsigned int sg_len) | |
fcd8b7c0 AG |
730 | { |
731 | struct rds_iw_device *rds_iwdev = pool->device; | |
732 | struct rds_iw_mapping *mapping = &ibmr->mapping; | |
733 | u64 *dma_pages; | |
9ddc8737 | 734 | int ret = 0; |
fcd8b7c0 AG |
735 | |
736 | rds_iw_set_scatterlist(&mapping->m_sg, sg, sg_len); | |
737 | ||
9ddc8737 SG |
738 | ret = rds_iw_map_scatterlist(rds_iwdev, &mapping->m_sg); |
739 | if (ret) { | |
fcd8b7c0 AG |
740 | dma_pages = NULL; |
741 | goto out; | |
742 | } | |
743 | ||
744 | if (mapping->m_sg.dma_len > pool->max_message_size) { | |
745 | ret = -EMSGSIZE; | |
746 | goto out; | |
747 | } | |
748 | ||
9ddc8737 | 749 | ret = rds_iw_rdma_reg_mr(mapping); |
fcd8b7c0 AG |
750 | if (ret) |
751 | goto out; | |
752 | ||
753 | rds_iw_stats_inc(s_iw_rdma_mr_used); | |
754 | ||
755 | out: | |
756 | kfree(dma_pages); | |
757 | ||
758 | return ret; | |
759 | } | |
760 | ||
761 | /* | |
762 | * "Free" a fastreg MR. | |
763 | */ | |
764 | static void rds_iw_free_fastreg(struct rds_iw_mr_pool *pool, | |
765 | struct rds_iw_mr *ibmr) | |
766 | { | |
767 | unsigned long flags; | |
768 | int ret; | |
769 | ||
770 | if (!ibmr->mapping.m_sg.dma_len) | |
771 | return; | |
772 | ||
773 | ret = rds_iw_rdma_fastreg_inv(ibmr); | |
774 | if (ret) | |
775 | return; | |
776 | ||
777 | /* Try to post the LOCAL_INV WR to the queue. */ | |
778 | spin_lock_irqsave(&pool->list_lock, flags); | |
779 | ||
780 | list_add_tail(&ibmr->mapping.m_list, &pool->dirty_list); | |
781 | atomic_add(ibmr->mapping.m_sg.len, &pool->free_pinned); | |
782 | atomic_inc(&pool->dirty_count); | |
783 | ||
784 | spin_unlock_irqrestore(&pool->list_lock, flags); | |
785 | } | |
786 | ||
787 | static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool *pool, | |
788 | struct list_head *unmap_list, | |
85a64889 JL |
789 | struct list_head *kill_list, |
790 | int *unpinned) | |
fcd8b7c0 AG |
791 | { |
792 | struct rds_iw_mapping *mapping, *next; | |
793 | unsigned int ncleaned = 0; | |
794 | LIST_HEAD(laundered); | |
795 | ||
796 | /* Batched invalidation of fastreg MRs. | |
797 | * Why do we do it this way, even though we could pipeline unmap | |
798 | * and remap? The reason is the application semantics - when the | |
799 | * application requests an invalidation of MRs, it expects all | |
800 | * previously released R_Keys to become invalid. | |
801 | * | |
802 | * If we implement MR reuse naively, we risk memory corruption | |
803 | * (this has actually been observed). So the default behavior | |
804 | * requires that a MR goes through an explicit unmap operation before | |
805 | * we can reuse it again. | |
806 | * | |
807 | * We could probably improve on this a little, by allowing immediate | |
808 | * reuse of a MR on the same socket (eg you could add small | |
809 | * cache of unused MRs to strct rds_socket - GET_MR could grab one | |
810 | * of these without requiring an explicit invalidate). | |
811 | */ | |
812 | while (!list_empty(unmap_list)) { | |
813 | unsigned long flags; | |
814 | ||
815 | spin_lock_irqsave(&pool->list_lock, flags); | |
816 | list_for_each_entry_safe(mapping, next, unmap_list, m_list) { | |
85a64889 | 817 | *unpinned += mapping->m_sg.len; |
fcd8b7c0 AG |
818 | list_move(&mapping->m_list, &laundered); |
819 | ncleaned++; | |
820 | } | |
821 | spin_unlock_irqrestore(&pool->list_lock, flags); | |
822 | } | |
823 | ||
824 | /* Move all laundered mappings back to the unmap list. | |
825 | * We do not kill any WRs right now - it doesn't seem the | |
826 | * fastreg API has a max_remap limit. */ | |
827 | list_splice_init(&laundered, unmap_list); | |
828 | ||
829 | return ncleaned; | |
830 | } | |
831 | ||
832 | static void rds_iw_destroy_fastreg(struct rds_iw_mr_pool *pool, | |
833 | struct rds_iw_mr *ibmr) | |
834 | { | |
fcd8b7c0 AG |
835 | if (ibmr->mr) |
836 | ib_dereg_mr(ibmr->mr); | |
837 | } |