Commit | Line | Data |
---|---|---|
fcd8b7c0 AG |
1 | /* |
2 | * Copyright (c) 2006 Oracle. All rights reserved. | |
3 | * | |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
31 | * | |
32 | */ | |
33 | #include <linux/kernel.h> | |
5a0e3ad6 | 34 | #include <linux/slab.h> |
cb0a6056 | 35 | #include <linux/ratelimit.h> |
fcd8b7c0 AG |
36 | |
37 | #include "rds.h" | |
fcd8b7c0 AG |
38 | #include "iw.h" |
39 | ||
40 | ||
41 | /* | |
42 | * This is stored as mr->r_trans_private. | |
43 | */ | |
44 | struct rds_iw_mr { | |
45 | struct rds_iw_device *device; | |
46 | struct rds_iw_mr_pool *pool; | |
47 | struct rdma_cm_id *cm_id; | |
48 | ||
49 | struct ib_mr *mr; | |
50 | struct ib_fast_reg_page_list *page_list; | |
51 | ||
52 | struct rds_iw_mapping mapping; | |
53 | unsigned char remap_count; | |
54 | }; | |
55 | ||
56 | /* | |
57 | * Our own little MR pool | |
58 | */ | |
59 | struct rds_iw_mr_pool { | |
60 | struct rds_iw_device *device; /* back ptr to the device that owns us */ | |
61 | ||
62 | struct mutex flush_lock; /* serialize fmr invalidate */ | |
63 | struct work_struct flush_worker; /* flush worker */ | |
64 | ||
65 | spinlock_t list_lock; /* protect variables below */ | |
66 | atomic_t item_count; /* total # of MRs */ | |
67 | atomic_t dirty_count; /* # dirty of MRs */ | |
68 | struct list_head dirty_list; /* dirty mappings */ | |
69 | struct list_head clean_list; /* unused & unamapped MRs */ | |
70 | atomic_t free_pinned; /* memory pinned by free MRs */ | |
71 | unsigned long max_message_size; /* in pages */ | |
72 | unsigned long max_items; | |
73 | unsigned long max_items_soft; | |
74 | unsigned long max_free_pinned; | |
75 | int max_pages; | |
76 | }; | |
77 | ||
78 | static int rds_iw_flush_mr_pool(struct rds_iw_mr_pool *pool, int free_all); | |
79 | static void rds_iw_mr_pool_flush_worker(struct work_struct *work); | |
80 | static int rds_iw_init_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr); | |
81 | static int rds_iw_map_fastreg(struct rds_iw_mr_pool *pool, | |
82 | struct rds_iw_mr *ibmr, | |
83 | struct scatterlist *sg, unsigned int nents); | |
84 | static void rds_iw_free_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr); | |
85 | static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool *pool, | |
86 | struct list_head *unmap_list, | |
85a64889 JL |
87 | struct list_head *kill_list, |
88 | int *unpinned); | |
fcd8b7c0 AG |
89 | static void rds_iw_destroy_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr); |
90 | ||
f862e07c AB |
91 | static int rds_iw_get_device(struct sockaddr_in *src, struct sockaddr_in *dst, |
92 | struct rds_iw_device **rds_iwdev, | |
93 | struct rdma_cm_id **cm_id) | |
fcd8b7c0 AG |
94 | { |
95 | struct rds_iw_device *iwdev; | |
96 | struct rds_iw_cm_id *i_cm_id; | |
97 | ||
98 | *rds_iwdev = NULL; | |
99 | *cm_id = NULL; | |
100 | ||
101 | list_for_each_entry(iwdev, &rds_iw_devices, list) { | |
102 | spin_lock_irq(&iwdev->spinlock); | |
103 | list_for_each_entry(i_cm_id, &iwdev->cm_id_list, list) { | |
104 | struct sockaddr_in *src_addr, *dst_addr; | |
105 | ||
106 | src_addr = (struct sockaddr_in *)&i_cm_id->cm_id->route.addr.src_addr; | |
107 | dst_addr = (struct sockaddr_in *)&i_cm_id->cm_id->route.addr.dst_addr; | |
108 | ||
109 | rdsdebug("local ipaddr = %x port %d, " | |
110 | "remote ipaddr = %x port %d" | |
111 | "..looking for %x port %d, " | |
112 | "remote ipaddr = %x port %d\n", | |
113 | src_addr->sin_addr.s_addr, | |
114 | src_addr->sin_port, | |
115 | dst_addr->sin_addr.s_addr, | |
116 | dst_addr->sin_port, | |
f862e07c AB |
117 | src->sin_addr.s_addr, |
118 | src->sin_port, | |
119 | dst->sin_addr.s_addr, | |
120 | dst->sin_port); | |
fcd8b7c0 | 121 | #ifdef WORKING_TUPLE_DETECTION |
f862e07c AB |
122 | if (src_addr->sin_addr.s_addr == src->sin_addr.s_addr && |
123 | src_addr->sin_port == src->sin_port && | |
124 | dst_addr->sin_addr.s_addr == dst->sin_addr.s_addr && | |
125 | dst_addr->sin_port == dst->sin_port) { | |
fcd8b7c0 AG |
126 | #else |
127 | /* FIXME - needs to compare the local and remote | |
128 | * ipaddr/port tuple, but the ipaddr is the only | |
25985edc | 129 | * available information in the rds_sock (as the rest are |
fcd8b7c0 AG |
130 | * zero'ed. It doesn't appear to be properly populated |
131 | * during connection setup... | |
132 | */ | |
f862e07c | 133 | if (src_addr->sin_addr.s_addr == src->sin_addr.s_addr) { |
fcd8b7c0 AG |
134 | #endif |
135 | spin_unlock_irq(&iwdev->spinlock); | |
136 | *rds_iwdev = iwdev; | |
137 | *cm_id = i_cm_id->cm_id; | |
138 | return 0; | |
139 | } | |
140 | } | |
141 | spin_unlock_irq(&iwdev->spinlock); | |
142 | } | |
143 | ||
144 | return 1; | |
145 | } | |
146 | ||
147 | static int rds_iw_add_cm_id(struct rds_iw_device *rds_iwdev, struct rdma_cm_id *cm_id) | |
148 | { | |
149 | struct rds_iw_cm_id *i_cm_id; | |
150 | ||
151 | i_cm_id = kmalloc(sizeof *i_cm_id, GFP_KERNEL); | |
152 | if (!i_cm_id) | |
153 | return -ENOMEM; | |
154 | ||
155 | i_cm_id->cm_id = cm_id; | |
156 | ||
157 | spin_lock_irq(&rds_iwdev->spinlock); | |
158 | list_add_tail(&i_cm_id->list, &rds_iwdev->cm_id_list); | |
159 | spin_unlock_irq(&rds_iwdev->spinlock); | |
160 | ||
161 | return 0; | |
162 | } | |
163 | ||
ff51bf84 | 164 | static void rds_iw_remove_cm_id(struct rds_iw_device *rds_iwdev, |
165 | struct rdma_cm_id *cm_id) | |
fcd8b7c0 AG |
166 | { |
167 | struct rds_iw_cm_id *i_cm_id; | |
168 | ||
169 | spin_lock_irq(&rds_iwdev->spinlock); | |
170 | list_for_each_entry(i_cm_id, &rds_iwdev->cm_id_list, list) { | |
171 | if (i_cm_id->cm_id == cm_id) { | |
172 | list_del(&i_cm_id->list); | |
173 | kfree(i_cm_id); | |
174 | break; | |
175 | } | |
176 | } | |
177 | spin_unlock_irq(&rds_iwdev->spinlock); | |
178 | } | |
179 | ||
180 | ||
181 | int rds_iw_update_cm_id(struct rds_iw_device *rds_iwdev, struct rdma_cm_id *cm_id) | |
182 | { | |
183 | struct sockaddr_in *src_addr, *dst_addr; | |
184 | struct rds_iw_device *rds_iwdev_old; | |
fcd8b7c0 AG |
185 | struct rdma_cm_id *pcm_id; |
186 | int rc; | |
187 | ||
188 | src_addr = (struct sockaddr_in *)&cm_id->route.addr.src_addr; | |
189 | dst_addr = (struct sockaddr_in *)&cm_id->route.addr.dst_addr; | |
190 | ||
f862e07c | 191 | rc = rds_iw_get_device(src_addr, dst_addr, &rds_iwdev_old, &pcm_id); |
fcd8b7c0 AG |
192 | if (rc) |
193 | rds_iw_remove_cm_id(rds_iwdev, cm_id); | |
194 | ||
195 | return rds_iw_add_cm_id(rds_iwdev, cm_id); | |
196 | } | |
197 | ||
745cbcca | 198 | void rds_iw_add_conn(struct rds_iw_device *rds_iwdev, struct rds_connection *conn) |
fcd8b7c0 AG |
199 | { |
200 | struct rds_iw_connection *ic = conn->c_transport_data; | |
201 | ||
202 | /* conn was previously on the nodev_conns_list */ | |
203 | spin_lock_irq(&iw_nodev_conns_lock); | |
204 | BUG_ON(list_empty(&iw_nodev_conns)); | |
205 | BUG_ON(list_empty(&ic->iw_node)); | |
206 | list_del(&ic->iw_node); | |
fcd8b7c0 | 207 | |
aef3ea33 | 208 | spin_lock(&rds_iwdev->spinlock); |
fcd8b7c0 | 209 | list_add_tail(&ic->iw_node, &rds_iwdev->conn_list); |
aef3ea33 | 210 | spin_unlock(&rds_iwdev->spinlock); |
745cbcca | 211 | spin_unlock_irq(&iw_nodev_conns_lock); |
fcd8b7c0 AG |
212 | |
213 | ic->rds_iwdev = rds_iwdev; | |
fcd8b7c0 AG |
214 | } |
215 | ||
745cbcca | 216 | void rds_iw_remove_conn(struct rds_iw_device *rds_iwdev, struct rds_connection *conn) |
fcd8b7c0 | 217 | { |
745cbcca | 218 | struct rds_iw_connection *ic = conn->c_transport_data; |
fcd8b7c0 | 219 | |
745cbcca AG |
220 | /* place conn on nodev_conns_list */ |
221 | spin_lock(&iw_nodev_conns_lock); | |
fcd8b7c0 | 222 | |
745cbcca AG |
223 | spin_lock_irq(&rds_iwdev->spinlock); |
224 | BUG_ON(list_empty(&ic->iw_node)); | |
225 | list_del(&ic->iw_node); | |
226 | spin_unlock_irq(&rds_iwdev->spinlock); | |
227 | ||
228 | list_add_tail(&ic->iw_node, &iw_nodev_conns); | |
229 | ||
230 | spin_unlock(&iw_nodev_conns_lock); | |
231 | ||
232 | rds_iw_remove_cm_id(ic->rds_iwdev, ic->i_cm_id); | |
233 | ic->rds_iwdev = NULL; | |
fcd8b7c0 AG |
234 | } |
235 | ||
745cbcca | 236 | void __rds_iw_destroy_conns(struct list_head *list, spinlock_t *list_lock) |
fcd8b7c0 AG |
237 | { |
238 | struct rds_iw_connection *ic, *_ic; | |
239 | LIST_HEAD(tmp_list); | |
240 | ||
241 | /* avoid calling conn_destroy with irqs off */ | |
745cbcca AG |
242 | spin_lock_irq(list_lock); |
243 | list_splice(list, &tmp_list); | |
244 | INIT_LIST_HEAD(list); | |
245 | spin_unlock_irq(list_lock); | |
fcd8b7c0 | 246 | |
433d308d | 247 | list_for_each_entry_safe(ic, _ic, &tmp_list, iw_node) |
fcd8b7c0 | 248 | rds_conn_destroy(ic->conn); |
fcd8b7c0 AG |
249 | } |
250 | ||
251 | static void rds_iw_set_scatterlist(struct rds_iw_scatterlist *sg, | |
252 | struct scatterlist *list, unsigned int sg_len) | |
253 | { | |
254 | sg->list = list; | |
255 | sg->len = sg_len; | |
256 | sg->dma_len = 0; | |
257 | sg->dma_npages = 0; | |
258 | sg->bytes = 0; | |
259 | } | |
260 | ||
261 | static u64 *rds_iw_map_scatterlist(struct rds_iw_device *rds_iwdev, | |
404bb72a | 262 | struct rds_iw_scatterlist *sg) |
fcd8b7c0 AG |
263 | { |
264 | struct ib_device *dev = rds_iwdev->dev; | |
265 | u64 *dma_pages = NULL; | |
fcd8b7c0 AG |
266 | int i, j, ret; |
267 | ||
fcd8b7c0 AG |
268 | WARN_ON(sg->dma_len); |
269 | ||
270 | sg->dma_len = ib_dma_map_sg(dev, sg->list, sg->len, DMA_BIDIRECTIONAL); | |
271 | if (unlikely(!sg->dma_len)) { | |
272 | printk(KERN_WARNING "RDS/IW: dma_map_sg failed!\n"); | |
273 | return ERR_PTR(-EBUSY); | |
274 | } | |
275 | ||
276 | sg->bytes = 0; | |
277 | sg->dma_npages = 0; | |
278 | ||
279 | ret = -EINVAL; | |
280 | for (i = 0; i < sg->dma_len; ++i) { | |
281 | unsigned int dma_len = ib_sg_dma_len(dev, &sg->list[i]); | |
282 | u64 dma_addr = ib_sg_dma_address(dev, &sg->list[i]); | |
283 | u64 end_addr; | |
284 | ||
285 | sg->bytes += dma_len; | |
286 | ||
287 | end_addr = dma_addr + dma_len; | |
404bb72a | 288 | if (dma_addr & PAGE_MASK) { |
fcd8b7c0 AG |
289 | if (i > 0) |
290 | goto out_unmap; | |
404bb72a | 291 | dma_addr &= ~PAGE_MASK; |
fcd8b7c0 | 292 | } |
404bb72a | 293 | if (end_addr & PAGE_MASK) { |
fcd8b7c0 AG |
294 | if (i < sg->dma_len - 1) |
295 | goto out_unmap; | |
404bb72a | 296 | end_addr = (end_addr + PAGE_MASK) & ~PAGE_MASK; |
fcd8b7c0 AG |
297 | } |
298 | ||
404bb72a | 299 | sg->dma_npages += (end_addr - dma_addr) >> PAGE_SHIFT; |
fcd8b7c0 AG |
300 | } |
301 | ||
302 | /* Now gather the dma addrs into one list */ | |
303 | if (sg->dma_npages > fastreg_message_size) | |
304 | goto out_unmap; | |
305 | ||
306 | dma_pages = kmalloc(sizeof(u64) * sg->dma_npages, GFP_ATOMIC); | |
307 | if (!dma_pages) { | |
308 | ret = -ENOMEM; | |
309 | goto out_unmap; | |
310 | } | |
311 | ||
312 | for (i = j = 0; i < sg->dma_len; ++i) { | |
313 | unsigned int dma_len = ib_sg_dma_len(dev, &sg->list[i]); | |
314 | u64 dma_addr = ib_sg_dma_address(dev, &sg->list[i]); | |
315 | u64 end_addr; | |
316 | ||
317 | end_addr = dma_addr + dma_len; | |
404bb72a AG |
318 | dma_addr &= ~PAGE_MASK; |
319 | for (; dma_addr < end_addr; dma_addr += PAGE_SIZE) | |
fcd8b7c0 AG |
320 | dma_pages[j++] = dma_addr; |
321 | BUG_ON(j > sg->dma_npages); | |
322 | } | |
323 | ||
324 | return dma_pages; | |
325 | ||
326 | out_unmap: | |
327 | ib_dma_unmap_sg(rds_iwdev->dev, sg->list, sg->len, DMA_BIDIRECTIONAL); | |
328 | sg->dma_len = 0; | |
329 | kfree(dma_pages); | |
330 | return ERR_PTR(ret); | |
331 | } | |
332 | ||
333 | ||
334 | struct rds_iw_mr_pool *rds_iw_create_mr_pool(struct rds_iw_device *rds_iwdev) | |
335 | { | |
336 | struct rds_iw_mr_pool *pool; | |
337 | ||
338 | pool = kzalloc(sizeof(*pool), GFP_KERNEL); | |
339 | if (!pool) { | |
340 | printk(KERN_WARNING "RDS/IW: rds_iw_create_mr_pool alloc error\n"); | |
341 | return ERR_PTR(-ENOMEM); | |
342 | } | |
343 | ||
344 | pool->device = rds_iwdev; | |
345 | INIT_LIST_HEAD(&pool->dirty_list); | |
346 | INIT_LIST_HEAD(&pool->clean_list); | |
347 | mutex_init(&pool->flush_lock); | |
348 | spin_lock_init(&pool->list_lock); | |
349 | INIT_WORK(&pool->flush_worker, rds_iw_mr_pool_flush_worker); | |
350 | ||
351 | pool->max_message_size = fastreg_message_size; | |
352 | pool->max_items = fastreg_pool_size; | |
353 | pool->max_free_pinned = pool->max_items * pool->max_message_size / 4; | |
354 | pool->max_pages = fastreg_message_size; | |
355 | ||
356 | /* We never allow more than max_items MRs to be allocated. | |
357 | * When we exceed more than max_items_soft, we start freeing | |
358 | * items more aggressively. | |
359 | * Make sure that max_items > max_items_soft > max_items / 2 | |
360 | */ | |
361 | pool->max_items_soft = pool->max_items * 3 / 4; | |
362 | ||
363 | return pool; | |
364 | } | |
365 | ||
366 | void rds_iw_get_mr_info(struct rds_iw_device *rds_iwdev, struct rds_info_rdma_connection *iinfo) | |
367 | { | |
368 | struct rds_iw_mr_pool *pool = rds_iwdev->mr_pool; | |
369 | ||
370 | iinfo->rdma_mr_max = pool->max_items; | |
371 | iinfo->rdma_mr_size = pool->max_pages; | |
372 | } | |
373 | ||
374 | void rds_iw_destroy_mr_pool(struct rds_iw_mr_pool *pool) | |
375 | { | |
376 | flush_workqueue(rds_wq); | |
377 | rds_iw_flush_mr_pool(pool, 1); | |
378 | BUG_ON(atomic_read(&pool->item_count)); | |
379 | BUG_ON(atomic_read(&pool->free_pinned)); | |
380 | kfree(pool); | |
381 | } | |
382 | ||
383 | static inline struct rds_iw_mr *rds_iw_reuse_fmr(struct rds_iw_mr_pool *pool) | |
384 | { | |
385 | struct rds_iw_mr *ibmr = NULL; | |
386 | unsigned long flags; | |
387 | ||
388 | spin_lock_irqsave(&pool->list_lock, flags); | |
389 | if (!list_empty(&pool->clean_list)) { | |
390 | ibmr = list_entry(pool->clean_list.next, struct rds_iw_mr, mapping.m_list); | |
391 | list_del_init(&ibmr->mapping.m_list); | |
392 | } | |
393 | spin_unlock_irqrestore(&pool->list_lock, flags); | |
394 | ||
395 | return ibmr; | |
396 | } | |
397 | ||
398 | static struct rds_iw_mr *rds_iw_alloc_mr(struct rds_iw_device *rds_iwdev) | |
399 | { | |
400 | struct rds_iw_mr_pool *pool = rds_iwdev->mr_pool; | |
401 | struct rds_iw_mr *ibmr = NULL; | |
402 | int err = 0, iter = 0; | |
403 | ||
404 | while (1) { | |
405 | ibmr = rds_iw_reuse_fmr(pool); | |
406 | if (ibmr) | |
407 | return ibmr; | |
408 | ||
409 | /* No clean MRs - now we have the choice of either | |
410 | * allocating a fresh MR up to the limit imposed by the | |
411 | * driver, or flush any dirty unused MRs. | |
412 | * We try to avoid stalling in the send path if possible, | |
413 | * so we allocate as long as we're allowed to. | |
414 | * | |
415 | * We're fussy with enforcing the FMR limit, though. If the driver | |
416 | * tells us we can't use more than N fmrs, we shouldn't start | |
417 | * arguing with it */ | |
418 | if (atomic_inc_return(&pool->item_count) <= pool->max_items) | |
419 | break; | |
420 | ||
421 | atomic_dec(&pool->item_count); | |
422 | ||
423 | if (++iter > 2) { | |
424 | rds_iw_stats_inc(s_iw_rdma_mr_pool_depleted); | |
425 | return ERR_PTR(-EAGAIN); | |
426 | } | |
427 | ||
428 | /* We do have some empty MRs. Flush them out. */ | |
429 | rds_iw_stats_inc(s_iw_rdma_mr_pool_wait); | |
430 | rds_iw_flush_mr_pool(pool, 0); | |
431 | } | |
432 | ||
433 | ibmr = kzalloc(sizeof(*ibmr), GFP_KERNEL); | |
434 | if (!ibmr) { | |
435 | err = -ENOMEM; | |
436 | goto out_no_cigar; | |
437 | } | |
438 | ||
439 | spin_lock_init(&ibmr->mapping.m_lock); | |
440 | INIT_LIST_HEAD(&ibmr->mapping.m_list); | |
441 | ibmr->mapping.m_mr = ibmr; | |
442 | ||
443 | err = rds_iw_init_fastreg(pool, ibmr); | |
444 | if (err) | |
445 | goto out_no_cigar; | |
446 | ||
447 | rds_iw_stats_inc(s_iw_rdma_mr_alloc); | |
448 | return ibmr; | |
449 | ||
450 | out_no_cigar: | |
451 | if (ibmr) { | |
452 | rds_iw_destroy_fastreg(pool, ibmr); | |
453 | kfree(ibmr); | |
454 | } | |
455 | atomic_dec(&pool->item_count); | |
456 | return ERR_PTR(err); | |
457 | } | |
458 | ||
459 | void rds_iw_sync_mr(void *trans_private, int direction) | |
460 | { | |
461 | struct rds_iw_mr *ibmr = trans_private; | |
462 | struct rds_iw_device *rds_iwdev = ibmr->device; | |
463 | ||
464 | switch (direction) { | |
465 | case DMA_FROM_DEVICE: | |
466 | ib_dma_sync_sg_for_cpu(rds_iwdev->dev, ibmr->mapping.m_sg.list, | |
467 | ibmr->mapping.m_sg.dma_len, DMA_BIDIRECTIONAL); | |
468 | break; | |
469 | case DMA_TO_DEVICE: | |
470 | ib_dma_sync_sg_for_device(rds_iwdev->dev, ibmr->mapping.m_sg.list, | |
471 | ibmr->mapping.m_sg.dma_len, DMA_BIDIRECTIONAL); | |
472 | break; | |
473 | } | |
474 | } | |
475 | ||
fcd8b7c0 AG |
476 | /* |
477 | * Flush our pool of MRs. | |
478 | * At a minimum, all currently unused MRs are unmapped. | |
479 | * If the number of MRs allocated exceeds the limit, we also try | |
480 | * to free as many MRs as needed to get back to this limit. | |
481 | */ | |
482 | static int rds_iw_flush_mr_pool(struct rds_iw_mr_pool *pool, int free_all) | |
483 | { | |
484 | struct rds_iw_mr *ibmr, *next; | |
485 | LIST_HEAD(unmap_list); | |
486 | LIST_HEAD(kill_list); | |
487 | unsigned long flags; | |
5b7bf42e | 488 | unsigned int nfreed = 0, ncleaned = 0, unpinned = 0; |
fcd8b7c0 AG |
489 | int ret = 0; |
490 | ||
491 | rds_iw_stats_inc(s_iw_rdma_mr_pool_flush); | |
492 | ||
493 | mutex_lock(&pool->flush_lock); | |
494 | ||
495 | spin_lock_irqsave(&pool->list_lock, flags); | |
496 | /* Get the list of all mappings to be destroyed */ | |
497 | list_splice_init(&pool->dirty_list, &unmap_list); | |
498 | if (free_all) | |
499 | list_splice_init(&pool->clean_list, &kill_list); | |
500 | spin_unlock_irqrestore(&pool->list_lock, flags); | |
501 | ||
fcd8b7c0 AG |
502 | /* Batched invalidate of dirty MRs. |
503 | * For FMR based MRs, the mappings on the unmap list are | |
504 | * actually members of an ibmr (ibmr->mapping). They either | |
505 | * migrate to the kill_list, or have been cleaned and should be | |
506 | * moved to the clean_list. | |
507 | * For fastregs, they will be dynamically allocated, and | |
508 | * will be destroyed by the unmap function. | |
509 | */ | |
510 | if (!list_empty(&unmap_list)) { | |
85a64889 JL |
511 | ncleaned = rds_iw_unmap_fastreg_list(pool, &unmap_list, |
512 | &kill_list, &unpinned); | |
fcd8b7c0 AG |
513 | /* If we've been asked to destroy all MRs, move those |
514 | * that were simply cleaned to the kill list */ | |
515 | if (free_all) | |
516 | list_splice_init(&unmap_list, &kill_list); | |
517 | } | |
518 | ||
519 | /* Destroy any MRs that are past their best before date */ | |
520 | list_for_each_entry_safe(ibmr, next, &kill_list, mapping.m_list) { | |
521 | rds_iw_stats_inc(s_iw_rdma_mr_free); | |
522 | list_del(&ibmr->mapping.m_list); | |
523 | rds_iw_destroy_fastreg(pool, ibmr); | |
524 | kfree(ibmr); | |
525 | nfreed++; | |
526 | } | |
527 | ||
528 | /* Anything that remains are laundered ibmrs, which we can add | |
529 | * back to the clean list. */ | |
530 | if (!list_empty(&unmap_list)) { | |
531 | spin_lock_irqsave(&pool->list_lock, flags); | |
532 | list_splice(&unmap_list, &pool->clean_list); | |
533 | spin_unlock_irqrestore(&pool->list_lock, flags); | |
534 | } | |
535 | ||
85a64889 | 536 | atomic_sub(unpinned, &pool->free_pinned); |
fcd8b7c0 AG |
537 | atomic_sub(ncleaned, &pool->dirty_count); |
538 | atomic_sub(nfreed, &pool->item_count); | |
539 | ||
540 | mutex_unlock(&pool->flush_lock); | |
541 | return ret; | |
542 | } | |
543 | ||
544 | static void rds_iw_mr_pool_flush_worker(struct work_struct *work) | |
545 | { | |
546 | struct rds_iw_mr_pool *pool = container_of(work, struct rds_iw_mr_pool, flush_worker); | |
547 | ||
548 | rds_iw_flush_mr_pool(pool, 0); | |
549 | } | |
550 | ||
551 | void rds_iw_free_mr(void *trans_private, int invalidate) | |
552 | { | |
553 | struct rds_iw_mr *ibmr = trans_private; | |
554 | struct rds_iw_mr_pool *pool = ibmr->device->mr_pool; | |
555 | ||
556 | rdsdebug("RDS/IW: free_mr nents %u\n", ibmr->mapping.m_sg.len); | |
557 | if (!pool) | |
558 | return; | |
559 | ||
560 | /* Return it to the pool's free list */ | |
561 | rds_iw_free_fastreg(pool, ibmr); | |
562 | ||
563 | /* If we've pinned too many pages, request a flush */ | |
f64f9e71 JP |
564 | if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned || |
565 | atomic_read(&pool->dirty_count) >= pool->max_items / 10) | |
fcd8b7c0 AG |
566 | queue_work(rds_wq, &pool->flush_worker); |
567 | ||
568 | if (invalidate) { | |
569 | if (likely(!in_interrupt())) { | |
570 | rds_iw_flush_mr_pool(pool, 0); | |
571 | } else { | |
572 | /* We get here if the user created a MR marked | |
573 | * as use_once and invalidate at the same time. */ | |
574 | queue_work(rds_wq, &pool->flush_worker); | |
575 | } | |
576 | } | |
577 | } | |
578 | ||
579 | void rds_iw_flush_mrs(void) | |
580 | { | |
581 | struct rds_iw_device *rds_iwdev; | |
582 | ||
583 | list_for_each_entry(rds_iwdev, &rds_iw_devices, list) { | |
584 | struct rds_iw_mr_pool *pool = rds_iwdev->mr_pool; | |
585 | ||
586 | if (pool) | |
587 | rds_iw_flush_mr_pool(pool, 0); | |
588 | } | |
589 | } | |
590 | ||
591 | void *rds_iw_get_mr(struct scatterlist *sg, unsigned long nents, | |
592 | struct rds_sock *rs, u32 *key_ret) | |
593 | { | |
594 | struct rds_iw_device *rds_iwdev; | |
595 | struct rds_iw_mr *ibmr = NULL; | |
596 | struct rdma_cm_id *cm_id; | |
f862e07c AB |
597 | struct sockaddr_in src = { |
598 | .sin_addr.s_addr = rs->rs_bound_addr, | |
599 | .sin_port = rs->rs_bound_port, | |
600 | }; | |
601 | struct sockaddr_in dst = { | |
602 | .sin_addr.s_addr = rs->rs_conn_addr, | |
603 | .sin_port = rs->rs_conn_port, | |
604 | }; | |
fcd8b7c0 AG |
605 | int ret; |
606 | ||
f862e07c | 607 | ret = rds_iw_get_device(&src, &dst, &rds_iwdev, &cm_id); |
fcd8b7c0 AG |
608 | if (ret || !cm_id) { |
609 | ret = -ENODEV; | |
610 | goto out; | |
611 | } | |
612 | ||
613 | if (!rds_iwdev->mr_pool) { | |
614 | ret = -ENODEV; | |
615 | goto out; | |
616 | } | |
617 | ||
618 | ibmr = rds_iw_alloc_mr(rds_iwdev); | |
619 | if (IS_ERR(ibmr)) | |
620 | return ibmr; | |
621 | ||
622 | ibmr->cm_id = cm_id; | |
623 | ibmr->device = rds_iwdev; | |
624 | ||
625 | ret = rds_iw_map_fastreg(rds_iwdev->mr_pool, ibmr, sg, nents); | |
626 | if (ret == 0) | |
627 | *key_ret = ibmr->mr->rkey; | |
628 | else | |
629 | printk(KERN_WARNING "RDS/IW: failed to map mr (errno=%d)\n", ret); | |
630 | ||
631 | out: | |
632 | if (ret) { | |
633 | if (ibmr) | |
634 | rds_iw_free_mr(ibmr, 0); | |
635 | ibmr = ERR_PTR(ret); | |
636 | } | |
637 | return ibmr; | |
638 | } | |
639 | ||
640 | /* | |
641 | * iWARP fastreg handling | |
642 | * | |
643 | * The life cycle of a fastreg registration is a bit different from | |
644 | * FMRs. | |
645 | * The idea behind fastreg is to have one MR, to which we bind different | |
646 | * mappings over time. To avoid stalling on the expensive map and invalidate | |
647 | * operations, these operations are pipelined on the same send queue on | |
648 | * which we want to send the message containing the r_key. | |
649 | * | |
650 | * This creates a bit of a problem for us, as we do not have the destination | |
651 | * IP in GET_MR, so the connection must be setup prior to the GET_MR call for | |
652 | * RDMA to be correctly setup. If a fastreg request is present, rds_iw_xmit | |
653 | * will try to queue a LOCAL_INV (if needed) and a FAST_REG_MR work request | |
654 | * before queuing the SEND. When completions for these arrive, they are | |
655 | * dispatched to the MR has a bit set showing that RDMa can be performed. | |
656 | * | |
657 | * There is another interesting aspect that's related to invalidation. | |
658 | * The application can request that a mapping is invalidated in FREE_MR. | |
659 | * The expectation there is that this invalidation step includes ALL | |
660 | * PREVIOUSLY FREED MRs. | |
661 | */ | |
662 | static int rds_iw_init_fastreg(struct rds_iw_mr_pool *pool, | |
663 | struct rds_iw_mr *ibmr) | |
664 | { | |
665 | struct rds_iw_device *rds_iwdev = pool->device; | |
666 | struct ib_fast_reg_page_list *page_list = NULL; | |
667 | struct ib_mr *mr; | |
668 | int err; | |
669 | ||
fc279959 SG |
670 | mr = ib_alloc_mr(rds_iwdev->pd, IB_MR_TYPE_MEM_REG, |
671 | pool->max_message_size); | |
fcd8b7c0 AG |
672 | if (IS_ERR(mr)) { |
673 | err = PTR_ERR(mr); | |
674 | ||
fc279959 | 675 | printk(KERN_WARNING "RDS/IW: ib_alloc_mr failed (err=%d)\n", err); |
fcd8b7c0 AG |
676 | return err; |
677 | } | |
678 | ||
679 | /* FIXME - this is overkill, but mapping->m_sg.dma_len/mapping->m_sg.dma_npages | |
680 | * is not filled in. | |
681 | */ | |
682 | page_list = ib_alloc_fast_reg_page_list(rds_iwdev->dev, pool->max_message_size); | |
683 | if (IS_ERR(page_list)) { | |
684 | err = PTR_ERR(page_list); | |
685 | ||
686 | printk(KERN_WARNING "RDS/IW: ib_alloc_fast_reg_page_list failed (err=%d)\n", err); | |
687 | ib_dereg_mr(mr); | |
688 | return err; | |
689 | } | |
690 | ||
691 | ibmr->page_list = page_list; | |
692 | ibmr->mr = mr; | |
693 | return 0; | |
694 | } | |
695 | ||
696 | static int rds_iw_rdma_build_fastreg(struct rds_iw_mapping *mapping) | |
697 | { | |
698 | struct rds_iw_mr *ibmr = mapping->m_mr; | |
699 | struct ib_send_wr f_wr, *failed_wr; | |
700 | int ret; | |
701 | ||
702 | /* | |
703 | * Perform a WR for the fast_reg_mr. Each individual page | |
704 | * in the sg list is added to the fast reg page list and placed | |
705 | * inside the fast_reg_mr WR. The key used is a rolling 8bit | |
706 | * counter, which should guarantee uniqueness. | |
707 | */ | |
708 | ib_update_fast_reg_key(ibmr->mr, ibmr->remap_count++); | |
709 | mapping->m_rkey = ibmr->mr->rkey; | |
710 | ||
711 | memset(&f_wr, 0, sizeof(f_wr)); | |
712 | f_wr.wr_id = RDS_IW_FAST_REG_WR_ID; | |
713 | f_wr.opcode = IB_WR_FAST_REG_MR; | |
714 | f_wr.wr.fast_reg.length = mapping->m_sg.bytes; | |
715 | f_wr.wr.fast_reg.rkey = mapping->m_rkey; | |
716 | f_wr.wr.fast_reg.page_list = ibmr->page_list; | |
717 | f_wr.wr.fast_reg.page_list_len = mapping->m_sg.dma_len; | |
404bb72a | 718 | f_wr.wr.fast_reg.page_shift = PAGE_SHIFT; |
fcd8b7c0 AG |
719 | f_wr.wr.fast_reg.access_flags = IB_ACCESS_LOCAL_WRITE | |
720 | IB_ACCESS_REMOTE_READ | | |
721 | IB_ACCESS_REMOTE_WRITE; | |
722 | f_wr.wr.fast_reg.iova_start = 0; | |
723 | f_wr.send_flags = IB_SEND_SIGNALED; | |
724 | ||
725 | failed_wr = &f_wr; | |
726 | ret = ib_post_send(ibmr->cm_id->qp, &f_wr, &failed_wr); | |
727 | BUG_ON(failed_wr != &f_wr); | |
cb0a6056 MZ |
728 | if (ret) |
729 | printk_ratelimited(KERN_WARNING "RDS/IW: %s:%d ib_post_send returned %d\n", | |
fcd8b7c0 AG |
730 | __func__, __LINE__, ret); |
731 | return ret; | |
732 | } | |
733 | ||
734 | static int rds_iw_rdma_fastreg_inv(struct rds_iw_mr *ibmr) | |
735 | { | |
736 | struct ib_send_wr s_wr, *failed_wr; | |
737 | int ret = 0; | |
738 | ||
739 | if (!ibmr->cm_id->qp || !ibmr->mr) | |
740 | goto out; | |
741 | ||
742 | memset(&s_wr, 0, sizeof(s_wr)); | |
743 | s_wr.wr_id = RDS_IW_LOCAL_INV_WR_ID; | |
744 | s_wr.opcode = IB_WR_LOCAL_INV; | |
745 | s_wr.ex.invalidate_rkey = ibmr->mr->rkey; | |
746 | s_wr.send_flags = IB_SEND_SIGNALED; | |
747 | ||
748 | failed_wr = &s_wr; | |
749 | ret = ib_post_send(ibmr->cm_id->qp, &s_wr, &failed_wr); | |
cb0a6056 MZ |
750 | if (ret) { |
751 | printk_ratelimited(KERN_WARNING "RDS/IW: %s:%d ib_post_send returned %d\n", | |
fcd8b7c0 AG |
752 | __func__, __LINE__, ret); |
753 | goto out; | |
754 | } | |
755 | out: | |
756 | return ret; | |
757 | } | |
758 | ||
759 | static int rds_iw_map_fastreg(struct rds_iw_mr_pool *pool, | |
760 | struct rds_iw_mr *ibmr, | |
761 | struct scatterlist *sg, | |
762 | unsigned int sg_len) | |
763 | { | |
764 | struct rds_iw_device *rds_iwdev = pool->device; | |
765 | struct rds_iw_mapping *mapping = &ibmr->mapping; | |
766 | u64 *dma_pages; | |
767 | int i, ret = 0; | |
768 | ||
769 | rds_iw_set_scatterlist(&mapping->m_sg, sg, sg_len); | |
770 | ||
404bb72a | 771 | dma_pages = rds_iw_map_scatterlist(rds_iwdev, &mapping->m_sg); |
fcd8b7c0 AG |
772 | if (IS_ERR(dma_pages)) { |
773 | ret = PTR_ERR(dma_pages); | |
774 | dma_pages = NULL; | |
775 | goto out; | |
776 | } | |
777 | ||
778 | if (mapping->m_sg.dma_len > pool->max_message_size) { | |
779 | ret = -EMSGSIZE; | |
780 | goto out; | |
781 | } | |
782 | ||
783 | for (i = 0; i < mapping->m_sg.dma_npages; ++i) | |
784 | ibmr->page_list->page_list[i] = dma_pages[i]; | |
785 | ||
786 | ret = rds_iw_rdma_build_fastreg(mapping); | |
787 | if (ret) | |
788 | goto out; | |
789 | ||
790 | rds_iw_stats_inc(s_iw_rdma_mr_used); | |
791 | ||
792 | out: | |
793 | kfree(dma_pages); | |
794 | ||
795 | return ret; | |
796 | } | |
797 | ||
798 | /* | |
799 | * "Free" a fastreg MR. | |
800 | */ | |
801 | static void rds_iw_free_fastreg(struct rds_iw_mr_pool *pool, | |
802 | struct rds_iw_mr *ibmr) | |
803 | { | |
804 | unsigned long flags; | |
805 | int ret; | |
806 | ||
807 | if (!ibmr->mapping.m_sg.dma_len) | |
808 | return; | |
809 | ||
810 | ret = rds_iw_rdma_fastreg_inv(ibmr); | |
811 | if (ret) | |
812 | return; | |
813 | ||
814 | /* Try to post the LOCAL_INV WR to the queue. */ | |
815 | spin_lock_irqsave(&pool->list_lock, flags); | |
816 | ||
817 | list_add_tail(&ibmr->mapping.m_list, &pool->dirty_list); | |
818 | atomic_add(ibmr->mapping.m_sg.len, &pool->free_pinned); | |
819 | atomic_inc(&pool->dirty_count); | |
820 | ||
821 | spin_unlock_irqrestore(&pool->list_lock, flags); | |
822 | } | |
823 | ||
824 | static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool *pool, | |
825 | struct list_head *unmap_list, | |
85a64889 JL |
826 | struct list_head *kill_list, |
827 | int *unpinned) | |
fcd8b7c0 AG |
828 | { |
829 | struct rds_iw_mapping *mapping, *next; | |
830 | unsigned int ncleaned = 0; | |
831 | LIST_HEAD(laundered); | |
832 | ||
833 | /* Batched invalidation of fastreg MRs. | |
834 | * Why do we do it this way, even though we could pipeline unmap | |
835 | * and remap? The reason is the application semantics - when the | |
836 | * application requests an invalidation of MRs, it expects all | |
837 | * previously released R_Keys to become invalid. | |
838 | * | |
839 | * If we implement MR reuse naively, we risk memory corruption | |
840 | * (this has actually been observed). So the default behavior | |
841 | * requires that a MR goes through an explicit unmap operation before | |
842 | * we can reuse it again. | |
843 | * | |
844 | * We could probably improve on this a little, by allowing immediate | |
845 | * reuse of a MR on the same socket (eg you could add small | |
846 | * cache of unused MRs to strct rds_socket - GET_MR could grab one | |
847 | * of these without requiring an explicit invalidate). | |
848 | */ | |
849 | while (!list_empty(unmap_list)) { | |
850 | unsigned long flags; | |
851 | ||
852 | spin_lock_irqsave(&pool->list_lock, flags); | |
853 | list_for_each_entry_safe(mapping, next, unmap_list, m_list) { | |
85a64889 | 854 | *unpinned += mapping->m_sg.len; |
fcd8b7c0 AG |
855 | list_move(&mapping->m_list, &laundered); |
856 | ncleaned++; | |
857 | } | |
858 | spin_unlock_irqrestore(&pool->list_lock, flags); | |
859 | } | |
860 | ||
861 | /* Move all laundered mappings back to the unmap list. | |
862 | * We do not kill any WRs right now - it doesn't seem the | |
863 | * fastreg API has a max_remap limit. */ | |
864 | list_splice_init(&laundered, unmap_list); | |
865 | ||
866 | return ncleaned; | |
867 | } | |
868 | ||
869 | static void rds_iw_destroy_fastreg(struct rds_iw_mr_pool *pool, | |
870 | struct rds_iw_mr *ibmr) | |
871 | { | |
872 | if (ibmr->page_list) | |
873 | ib_free_fast_reg_page_list(ibmr->page_list); | |
874 | if (ibmr->mr) | |
875 | ib_dereg_mr(ibmr->mr); | |
876 | } |