Merge branch 'next' into for-linus
[deliverable/linux.git] / drivers / infiniband / core / fmr_pool.c
1 /*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34 #include <linux/errno.h>
35 #include <linux/spinlock.h>
36 #include <linux/export.h>
37 #include <linux/slab.h>
38 #include <linux/jhash.h>
39 #include <linux/kthread.h>
40
41 #include <rdma/ib_fmr_pool.h>
42
43 #include "core_priv.h"
44
45 #define PFX "fmr_pool: "
46
47 enum {
48 IB_FMR_MAX_REMAPS = 32,
49
50 IB_FMR_HASH_BITS = 8,
51 IB_FMR_HASH_SIZE = 1 << IB_FMR_HASH_BITS,
52 IB_FMR_HASH_MASK = IB_FMR_HASH_SIZE - 1
53 };
54
55 /*
56 * If an FMR is not in use, then the list member will point to either
57 * its pool's free_list (if the FMR can be mapped again; that is,
58 * remap_count < pool->max_remaps) or its pool's dirty_list (if the
59 * FMR needs to be unmapped before being remapped). In either of
60 * these cases it is a bug if the ref_count is not 0. In other words,
61 * if ref_count is > 0, then the list member must not be linked into
62 * either free_list or dirty_list.
63 *
64 * The cache_node member is used to link the FMR into a cache bucket
65 * (if caching is enabled). This is independent of the reference
66 * count of the FMR. When a valid FMR is released, its ref_count is
67 * decremented, and if ref_count reaches 0, the FMR is placed in
68 * either free_list or dirty_list as appropriate. However, it is not
69 * removed from the cache and may be "revived" if a call to
70 * ib_fmr_register_physical() occurs before the FMR is remapped. In
71 * this case we just increment the ref_count and remove the FMR from
72 * free_list/dirty_list.
73 *
74 * Before we remap an FMR from free_list, we remove it from the cache
75 * (to prevent another user from obtaining a stale FMR). When an FMR
76 * is released, we add it to the tail of the free list, so that our
77 * cache eviction policy is "least recently used."
78 *
79 * All manipulation of ref_count, list and cache_node is protected by
80 * pool_lock to maintain consistency.
81 */
82
83 struct ib_fmr_pool {
84 spinlock_t pool_lock;
85
86 int pool_size;
87 int max_pages;
88 int max_remaps;
89 int dirty_watermark;
90 int dirty_len;
91 struct list_head free_list;
92 struct list_head dirty_list;
93 struct hlist_head *cache_bucket;
94
95 void (*flush_function)(struct ib_fmr_pool *pool,
96 void * arg);
97 void *flush_arg;
98
99 struct task_struct *thread;
100
101 atomic_t req_ser;
102 atomic_t flush_ser;
103
104 wait_queue_head_t force_wait;
105 };
106
107 static inline u32 ib_fmr_hash(u64 first_page)
108 {
109 return jhash_2words((u32) first_page, (u32) (first_page >> 32), 0) &
110 (IB_FMR_HASH_SIZE - 1);
111 }
112
113 /* Caller must hold pool_lock */
114 static inline struct ib_pool_fmr *ib_fmr_cache_lookup(struct ib_fmr_pool *pool,
115 u64 *page_list,
116 int page_list_len,
117 u64 io_virtual_address)
118 {
119 struct hlist_head *bucket;
120 struct ib_pool_fmr *fmr;
121 struct hlist_node *pos;
122
123 if (!pool->cache_bucket)
124 return NULL;
125
126 bucket = pool->cache_bucket + ib_fmr_hash(*page_list);
127
128 hlist_for_each_entry(fmr, pos, bucket, cache_node)
129 if (io_virtual_address == fmr->io_virtual_address &&
130 page_list_len == fmr->page_list_len &&
131 !memcmp(page_list, fmr->page_list,
132 page_list_len * sizeof *page_list))
133 return fmr;
134
135 return NULL;
136 }
137
138 static void ib_fmr_batch_release(struct ib_fmr_pool *pool)
139 {
140 int ret;
141 struct ib_pool_fmr *fmr;
142 LIST_HEAD(unmap_list);
143 LIST_HEAD(fmr_list);
144
145 spin_lock_irq(&pool->pool_lock);
146
147 list_for_each_entry(fmr, &pool->dirty_list, list) {
148 hlist_del_init(&fmr->cache_node);
149 fmr->remap_count = 0;
150 list_add_tail(&fmr->fmr->list, &fmr_list);
151
152 #ifdef DEBUG
153 if (fmr->ref_count !=0) {
154 printk(KERN_WARNING PFX "Unmapping FMR 0x%08x with ref count %d\n",
155 fmr, fmr->ref_count);
156 }
157 #endif
158 }
159
160 list_splice_init(&pool->dirty_list, &unmap_list);
161 pool->dirty_len = 0;
162
163 spin_unlock_irq(&pool->pool_lock);
164
165 if (list_empty(&unmap_list)) {
166 return;
167 }
168
169 ret = ib_unmap_fmr(&fmr_list);
170 if (ret)
171 printk(KERN_WARNING PFX "ib_unmap_fmr returned %d\n", ret);
172
173 spin_lock_irq(&pool->pool_lock);
174 list_splice(&unmap_list, &pool->free_list);
175 spin_unlock_irq(&pool->pool_lock);
176 }
177
178 static int ib_fmr_cleanup_thread(void *pool_ptr)
179 {
180 struct ib_fmr_pool *pool = pool_ptr;
181
182 do {
183 if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
184 ib_fmr_batch_release(pool);
185
186 atomic_inc(&pool->flush_ser);
187 wake_up_interruptible(&pool->force_wait);
188
189 if (pool->flush_function)
190 pool->flush_function(pool, pool->flush_arg);
191 }
192
193 set_current_state(TASK_INTERRUPTIBLE);
194 if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
195 !kthread_should_stop())
196 schedule();
197 __set_current_state(TASK_RUNNING);
198 } while (!kthread_should_stop());
199
200 return 0;
201 }
202
203 /**
204 * ib_create_fmr_pool - Create an FMR pool
205 * @pd:Protection domain for FMRs
206 * @params:FMR pool parameters
207 *
208 * Create a pool of FMRs. Return value is pointer to new pool or
209 * error code if creation failed.
210 */
211 struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
212 struct ib_fmr_pool_param *params)
213 {
214 struct ib_device *device;
215 struct ib_fmr_pool *pool;
216 struct ib_device_attr *attr;
217 int i;
218 int ret;
219 int max_remaps;
220
221 if (!params)
222 return ERR_PTR(-EINVAL);
223
224 device = pd->device;
225 if (!device->alloc_fmr || !device->dealloc_fmr ||
226 !device->map_phys_fmr || !device->unmap_fmr) {
227 printk(KERN_INFO PFX "Device %s does not support FMRs\n",
228 device->name);
229 return ERR_PTR(-ENOSYS);
230 }
231
232 attr = kmalloc(sizeof *attr, GFP_KERNEL);
233 if (!attr) {
234 printk(KERN_WARNING PFX "couldn't allocate device attr struct\n");
235 return ERR_PTR(-ENOMEM);
236 }
237
238 ret = ib_query_device(device, attr);
239 if (ret) {
240 printk(KERN_WARNING PFX "couldn't query device: %d\n", ret);
241 kfree(attr);
242 return ERR_PTR(ret);
243 }
244
245 if (!attr->max_map_per_fmr)
246 max_remaps = IB_FMR_MAX_REMAPS;
247 else
248 max_remaps = attr->max_map_per_fmr;
249
250 kfree(attr);
251
252 pool = kmalloc(sizeof *pool, GFP_KERNEL);
253 if (!pool) {
254 printk(KERN_WARNING PFX "couldn't allocate pool struct\n");
255 return ERR_PTR(-ENOMEM);
256 }
257
258 pool->cache_bucket = NULL;
259
260 pool->flush_function = params->flush_function;
261 pool->flush_arg = params->flush_arg;
262
263 INIT_LIST_HEAD(&pool->free_list);
264 INIT_LIST_HEAD(&pool->dirty_list);
265
266 if (params->cache) {
267 pool->cache_bucket =
268 kmalloc(IB_FMR_HASH_SIZE * sizeof *pool->cache_bucket,
269 GFP_KERNEL);
270 if (!pool->cache_bucket) {
271 printk(KERN_WARNING PFX "Failed to allocate cache in pool\n");
272 ret = -ENOMEM;
273 goto out_free_pool;
274 }
275
276 for (i = 0; i < IB_FMR_HASH_SIZE; ++i)
277 INIT_HLIST_HEAD(pool->cache_bucket + i);
278 }
279
280 pool->pool_size = 0;
281 pool->max_pages = params->max_pages_per_fmr;
282 pool->max_remaps = max_remaps;
283 pool->dirty_watermark = params->dirty_watermark;
284 pool->dirty_len = 0;
285 spin_lock_init(&pool->pool_lock);
286 atomic_set(&pool->req_ser, 0);
287 atomic_set(&pool->flush_ser, 0);
288 init_waitqueue_head(&pool->force_wait);
289
290 pool->thread = kthread_run(ib_fmr_cleanup_thread,
291 pool,
292 "ib_fmr(%s)",
293 device->name);
294 if (IS_ERR(pool->thread)) {
295 printk(KERN_WARNING PFX "couldn't start cleanup thread\n");
296 ret = PTR_ERR(pool->thread);
297 goto out_free_pool;
298 }
299
300 {
301 struct ib_pool_fmr *fmr;
302 struct ib_fmr_attr fmr_attr = {
303 .max_pages = params->max_pages_per_fmr,
304 .max_maps = pool->max_remaps,
305 .page_shift = params->page_shift
306 };
307 int bytes_per_fmr = sizeof *fmr;
308
309 if (pool->cache_bucket)
310 bytes_per_fmr += params->max_pages_per_fmr * sizeof (u64);
311
312 for (i = 0; i < params->pool_size; ++i) {
313 fmr = kmalloc(bytes_per_fmr, GFP_KERNEL);
314 if (!fmr) {
315 printk(KERN_WARNING PFX "failed to allocate fmr "
316 "struct for FMR %d\n", i);
317 goto out_fail;
318 }
319
320 fmr->pool = pool;
321 fmr->remap_count = 0;
322 fmr->ref_count = 0;
323 INIT_HLIST_NODE(&fmr->cache_node);
324
325 fmr->fmr = ib_alloc_fmr(pd, params->access, &fmr_attr);
326 if (IS_ERR(fmr->fmr)) {
327 printk(KERN_WARNING PFX "fmr_create failed "
328 "for FMR %d\n", i);
329 kfree(fmr);
330 goto out_fail;
331 }
332
333 list_add_tail(&fmr->list, &pool->free_list);
334 ++pool->pool_size;
335 }
336 }
337
338 return pool;
339
340 out_free_pool:
341 kfree(pool->cache_bucket);
342 kfree(pool);
343
344 return ERR_PTR(ret);
345
346 out_fail:
347 ib_destroy_fmr_pool(pool);
348
349 return ERR_PTR(-ENOMEM);
350 }
351 EXPORT_SYMBOL(ib_create_fmr_pool);
352
353 /**
354 * ib_destroy_fmr_pool - Free FMR pool
355 * @pool:FMR pool to free
356 *
357 * Destroy an FMR pool and free all associated resources.
358 */
359 void ib_destroy_fmr_pool(struct ib_fmr_pool *pool)
360 {
361 struct ib_pool_fmr *fmr;
362 struct ib_pool_fmr *tmp;
363 LIST_HEAD(fmr_list);
364 int i;
365
366 kthread_stop(pool->thread);
367 ib_fmr_batch_release(pool);
368
369 i = 0;
370 list_for_each_entry_safe(fmr, tmp, &pool->free_list, list) {
371 if (fmr->remap_count) {
372 INIT_LIST_HEAD(&fmr_list);
373 list_add_tail(&fmr->fmr->list, &fmr_list);
374 ib_unmap_fmr(&fmr_list);
375 }
376 ib_dealloc_fmr(fmr->fmr);
377 list_del(&fmr->list);
378 kfree(fmr);
379 ++i;
380 }
381
382 if (i < pool->pool_size)
383 printk(KERN_WARNING PFX "pool still has %d regions registered\n",
384 pool->pool_size - i);
385
386 kfree(pool->cache_bucket);
387 kfree(pool);
388 }
389 EXPORT_SYMBOL(ib_destroy_fmr_pool);
390
391 /**
392 * ib_flush_fmr_pool - Invalidate all unmapped FMRs
393 * @pool:FMR pool to flush
394 *
395 * Ensure that all unmapped FMRs are fully invalidated.
396 */
397 int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
398 {
399 int serial;
400 struct ib_pool_fmr *fmr, *next;
401
402 /*
403 * The free_list holds FMRs that may have been used
404 * but have not been remapped enough times to be dirty.
405 * Put them on the dirty list now so that the cleanup
406 * thread will reap them too.
407 */
408 spin_lock_irq(&pool->pool_lock);
409 list_for_each_entry_safe(fmr, next, &pool->free_list, list) {
410 if (fmr->remap_count > 0)
411 list_move(&fmr->list, &pool->dirty_list);
412 }
413 spin_unlock_irq(&pool->pool_lock);
414
415 serial = atomic_inc_return(&pool->req_ser);
416 wake_up_process(pool->thread);
417
418 if (wait_event_interruptible(pool->force_wait,
419 atomic_read(&pool->flush_ser) - serial >= 0))
420 return -EINTR;
421
422 return 0;
423 }
424 EXPORT_SYMBOL(ib_flush_fmr_pool);
425
426 /**
427 * ib_fmr_pool_map_phys -
428 * @pool:FMR pool to allocate FMR from
429 * @page_list:List of pages to map
430 * @list_len:Number of pages in @page_list
431 * @io_virtual_address:I/O virtual address for new FMR
432 *
433 * Map an FMR from an FMR pool.
434 */
435 struct ib_pool_fmr *ib_fmr_pool_map_phys(struct ib_fmr_pool *pool_handle,
436 u64 *page_list,
437 int list_len,
438 u64 io_virtual_address)
439 {
440 struct ib_fmr_pool *pool = pool_handle;
441 struct ib_pool_fmr *fmr;
442 unsigned long flags;
443 int result;
444
445 if (list_len < 1 || list_len > pool->max_pages)
446 return ERR_PTR(-EINVAL);
447
448 spin_lock_irqsave(&pool->pool_lock, flags);
449 fmr = ib_fmr_cache_lookup(pool,
450 page_list,
451 list_len,
452 io_virtual_address);
453 if (fmr) {
454 /* found in cache */
455 ++fmr->ref_count;
456 if (fmr->ref_count == 1) {
457 list_del(&fmr->list);
458 }
459
460 spin_unlock_irqrestore(&pool->pool_lock, flags);
461
462 return fmr;
463 }
464
465 if (list_empty(&pool->free_list)) {
466 spin_unlock_irqrestore(&pool->pool_lock, flags);
467 return ERR_PTR(-EAGAIN);
468 }
469
470 fmr = list_entry(pool->free_list.next, struct ib_pool_fmr, list);
471 list_del(&fmr->list);
472 hlist_del_init(&fmr->cache_node);
473 spin_unlock_irqrestore(&pool->pool_lock, flags);
474
475 result = ib_map_phys_fmr(fmr->fmr, page_list, list_len,
476 io_virtual_address);
477
478 if (result) {
479 spin_lock_irqsave(&pool->pool_lock, flags);
480 list_add(&fmr->list, &pool->free_list);
481 spin_unlock_irqrestore(&pool->pool_lock, flags);
482
483 printk(KERN_WARNING PFX "fmr_map returns %d\n", result);
484
485 return ERR_PTR(result);
486 }
487
488 ++fmr->remap_count;
489 fmr->ref_count = 1;
490
491 if (pool->cache_bucket) {
492 fmr->io_virtual_address = io_virtual_address;
493 fmr->page_list_len = list_len;
494 memcpy(fmr->page_list, page_list, list_len * sizeof(*page_list));
495
496 spin_lock_irqsave(&pool->pool_lock, flags);
497 hlist_add_head(&fmr->cache_node,
498 pool->cache_bucket + ib_fmr_hash(fmr->page_list[0]));
499 spin_unlock_irqrestore(&pool->pool_lock, flags);
500 }
501
502 return fmr;
503 }
504 EXPORT_SYMBOL(ib_fmr_pool_map_phys);
505
506 /**
507 * ib_fmr_pool_unmap - Unmap FMR
508 * @fmr:FMR to unmap
509 *
510 * Unmap an FMR. The FMR mapping may remain valid until the FMR is
511 * reused (or until ib_flush_fmr_pool() is called).
512 */
513 int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
514 {
515 struct ib_fmr_pool *pool;
516 unsigned long flags;
517
518 pool = fmr->pool;
519
520 spin_lock_irqsave(&pool->pool_lock, flags);
521
522 --fmr->ref_count;
523 if (!fmr->ref_count) {
524 if (fmr->remap_count < pool->max_remaps) {
525 list_add_tail(&fmr->list, &pool->free_list);
526 } else {
527 list_add_tail(&fmr->list, &pool->dirty_list);
528 if (++pool->dirty_len >= pool->dirty_watermark) {
529 atomic_inc(&pool->req_ser);
530 wake_up_process(pool->thread);
531 }
532 }
533 }
534
535 #ifdef DEBUG
536 if (fmr->ref_count < 0)
537 printk(KERN_WARNING PFX "FMR %p has ref count %d < 0\n",
538 fmr, fmr->ref_count);
539 #endif
540
541 spin_unlock_irqrestore(&pool->pool_lock, flags);
542
543 return 0;
544 }
545 EXPORT_SYMBOL(ib_fmr_pool_unmap);
This page took 0.043677 seconds and 6 git commands to generate.