2 * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * $Id: iser_memory.c 6964 2006-05-07 11:11:43Z ogerlitz $
34 #include <linux/module.h>
35 #include <linux/kernel.h>
36 #include <linux/slab.h>
39 #include <asm/scatterlist.h>
40 #include <linux/scatterlist.h>
42 #include "iscsi_iser.h"
44 #define ISER_KMALLOC_THRESHOLD 0x20000 /* 128K - kmalloc limit */
47 * Decrements the reference count for the
48 * registered buffer & releases it
50 * returns 0 if released, 1 if deferred
52 int iser_regd_buff_release(struct iser_regd_buf
*regd_buf
)
54 struct device
*dma_device
;
56 if ((atomic_read(®d_buf
->ref_count
) == 0) ||
57 atomic_dec_and_test(®d_buf
->ref_count
)) {
58 /* if we used the dma mr, unreg is just NOP */
59 if (regd_buf
->reg
.is_fmr
)
60 iser_unreg_mem(®d_buf
->reg
);
62 if (regd_buf
->dma_addr
) {
63 dma_device
= regd_buf
->device
->ib_device
->dma_device
;
64 dma_unmap_single(dma_device
,
69 /* else this regd buf is associated with task which we */
70 /* dma_unmap_single/sg later */
73 iser_dbg("Release deferred, regd.buff: 0x%p\n", regd_buf
);
79 * iser_reg_single - fills registered buffer descriptor with
80 * registration information
82 void iser_reg_single(struct iser_device
*device
,
83 struct iser_regd_buf
*regd_buf
,
84 enum dma_data_direction direction
)
88 dma_addr
= dma_map_single(device
->ib_device
->dma_device
,
90 regd_buf
->data_size
, direction
);
91 BUG_ON(dma_mapping_error(dma_addr
));
93 regd_buf
->reg
.lkey
= device
->mr
->lkey
;
94 regd_buf
->reg
.len
= regd_buf
->data_size
;
95 regd_buf
->reg
.va
= dma_addr
;
96 regd_buf
->reg
.is_fmr
= 0;
98 regd_buf
->dma_addr
= dma_addr
;
99 regd_buf
->direction
= direction
;
103 * iser_start_rdma_unaligned_sg
105 int iser_start_rdma_unaligned_sg(struct iscsi_iser_cmd_task
*iser_ctask
,
106 enum iser_data_dir cmd_dir
)
109 struct device
*dma_device
;
111 struct iser_data_buf
*data
= &iser_ctask
->data
[cmd_dir
];
112 unsigned long cmd_data_len
= data
->data_len
;
114 if (cmd_data_len
> ISER_KMALLOC_THRESHOLD
)
115 mem
= (void *)__get_free_pages(GFP_NOIO
,
116 long_log2(roundup_pow_of_two(cmd_data_len
)) - PAGE_SHIFT
);
118 mem
= kmalloc(cmd_data_len
, GFP_NOIO
);
121 iser_err("Failed to allocate mem size %d %d for copying sglist\n",
122 data
->size
,(int)cmd_data_len
);
126 if (cmd_dir
== ISER_DIR_OUT
) {
127 /* copy the unaligned sg the buffer which is used for RDMA */
128 struct scatterlist
*sg
= (struct scatterlist
*)data
->buf
;
132 for (p
= mem
, i
= 0; i
< data
->size
; i
++) {
133 from
= kmap_atomic(sg
[i
].page
, KM_USER0
);
137 kunmap_atomic(from
, KM_USER0
);
142 sg_init_one(&iser_ctask
->data_copy
[cmd_dir
].sg_single
, mem
, cmd_data_len
);
143 iser_ctask
->data_copy
[cmd_dir
].buf
=
144 &iser_ctask
->data_copy
[cmd_dir
].sg_single
;
145 iser_ctask
->data_copy
[cmd_dir
].size
= 1;
147 iser_ctask
->data_copy
[cmd_dir
].copy_buf
= mem
;
149 dma_device
= iser_ctask
->iser_conn
->ib_conn
->device
->ib_device
->dma_device
;
151 if (cmd_dir
== ISER_DIR_OUT
)
152 dma_nents
= dma_map_sg(dma_device
,
153 &iser_ctask
->data_copy
[cmd_dir
].sg_single
,
156 dma_nents
= dma_map_sg(dma_device
,
157 &iser_ctask
->data_copy
[cmd_dir
].sg_single
,
160 BUG_ON(dma_nents
== 0);
162 iser_ctask
->data_copy
[cmd_dir
].dma_nents
= dma_nents
;
167 * iser_finalize_rdma_unaligned_sg
169 void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task
*iser_ctask
,
170 enum iser_data_dir cmd_dir
)
172 struct device
*dma_device
;
173 struct iser_data_buf
*mem_copy
;
174 unsigned long cmd_data_len
;
176 dma_device
= iser_ctask
->iser_conn
->ib_conn
->device
->ib_device
->dma_device
;
177 mem_copy
= &iser_ctask
->data_copy
[cmd_dir
];
179 if (cmd_dir
== ISER_DIR_OUT
)
180 dma_unmap_sg(dma_device
, &mem_copy
->sg_single
, 1,
183 dma_unmap_sg(dma_device
, &mem_copy
->sg_single
, 1,
186 if (cmd_dir
== ISER_DIR_IN
) {
188 struct scatterlist
*sg
;
189 unsigned char *p
, *to
;
190 unsigned int sg_size
;
193 /* copy back read RDMA to unaligned sg */
194 mem
= mem_copy
->copy_buf
;
196 sg
= (struct scatterlist
*)iser_ctask
->data
[ISER_DIR_IN
].buf
;
197 sg_size
= iser_ctask
->data
[ISER_DIR_IN
].size
;
199 for (p
= mem
, i
= 0; i
< sg_size
; i
++){
200 to
= kmap_atomic(sg
[i
].page
, KM_SOFTIRQ0
);
201 memcpy(to
+ sg
[i
].offset
,
204 kunmap_atomic(to
, KM_SOFTIRQ0
);
209 cmd_data_len
= iser_ctask
->data
[cmd_dir
].data_len
;
211 if (cmd_data_len
> ISER_KMALLOC_THRESHOLD
)
212 free_pages((unsigned long)mem_copy
->copy_buf
,
213 long_log2(roundup_pow_of_two(cmd_data_len
)) - PAGE_SHIFT
);
215 kfree(mem_copy
->copy_buf
);
217 mem_copy
->copy_buf
= NULL
;
221 * iser_sg_to_page_vec - Translates scatterlist entries to physical addresses
222 * and returns the length of resulting physical address array (may be less than
223 * the original due to possible compaction).
225 * we build a "page vec" under the assumption that the SG meets the RDMA
226 * alignment requirements. Other then the first and last SG elements, all
227 * the "internal" elements can be compacted into a list whose elements are
228 * dma addresses of physical pages. The code supports also the weird case
229 * where --few fragments of the same page-- are present in the SG as
230 * consecutive elements. Also, it handles one entry SG.
232 static int iser_sg_to_page_vec(struct iser_data_buf
*data
,
233 struct iser_page_vec
*page_vec
)
235 struct scatterlist
*sg
= (struct scatterlist
*)data
->buf
;
236 dma_addr_t first_addr
, last_addr
, page
;
237 int start_aligned
, end_aligned
;
238 unsigned int cur_page
= 0;
239 unsigned long total_sz
= 0;
242 /* compute the offset of first element */
243 page_vec
->offset
= (u64
) sg
[0].offset
& ~MASK_4K
;
245 for (i
= 0; i
< data
->dma_nents
; i
++) {
246 total_sz
+= sg_dma_len(&sg
[i
]);
248 first_addr
= sg_dma_address(&sg
[i
]);
249 last_addr
= first_addr
+ sg_dma_len(&sg
[i
]);
251 start_aligned
= !(first_addr
& ~MASK_4K
);
252 end_aligned
= !(last_addr
& ~MASK_4K
);
254 /* continue to collect page fragments till aligned or SG ends */
255 while (!end_aligned
&& (i
+ 1 < data
->dma_nents
)) {
257 total_sz
+= sg_dma_len(&sg
[i
]);
258 last_addr
= sg_dma_address(&sg
[i
]) + sg_dma_len(&sg
[i
]);
259 end_aligned
= !(last_addr
& ~MASK_4K
);
262 /* handle the 1st page in the 1st DMA element */
264 page
= first_addr
& MASK_4K
;
265 page_vec
->pages
[cur_page
] = page
;
271 for (; page
< last_addr
; page
+= SIZE_4K
) {
272 page_vec
->pages
[cur_page
] = page
;
277 page_vec
->data_size
= total_sz
;
278 iser_dbg("page_vec->data_size:%d cur_page %d\n", page_vec
->data_size
,cur_page
);
282 #define IS_4K_ALIGNED(addr) ((((unsigned long)addr) & ~MASK_4K) == 0)
285 * iser_data_buf_aligned_len - Tries to determine the maximal correctly aligned
286 * for RDMA sub-list of a scatter-gather list of memory buffers, and returns
287 * the number of entries which are aligned correctly. Supports the case where
288 * consecutive SG elements are actually fragments of the same physcial page.
290 static unsigned int iser_data_buf_aligned_len(struct iser_data_buf
*data
)
292 struct scatterlist
*sg
;
293 dma_addr_t end_addr
, next_addr
;
295 unsigned int ret_len
= 0;
297 sg
= (struct scatterlist
*)data
->buf
;
299 for (cnt
= 0, i
= 0; i
< data
->dma_nents
; i
++, cnt
++) {
300 /* iser_dbg("Checking sg iobuf [%d]: phys=0x%08lX "
301 "offset: %ld sz: %ld\n", i,
302 (unsigned long)page_to_phys(sg[i].page),
303 (unsigned long)sg[i].offset,
304 (unsigned long)sg[i].length); */
305 end_addr
= sg_dma_address(&sg
[i
]) +
307 /* iser_dbg("Checking sg iobuf end address "
308 "0x%08lX\n", end_addr); */
309 if (i
+ 1 < data
->dma_nents
) {
310 next_addr
= sg_dma_address(&sg
[i
+1]);
311 /* are i, i+1 fragments of the same page? */
312 if (end_addr
== next_addr
)
314 else if (!IS_4K_ALIGNED(end_addr
)) {
320 if (i
== data
->dma_nents
)
321 ret_len
= cnt
; /* loop ended */
322 iser_dbg("Found %d aligned entries out of %d in sg:0x%p\n",
323 ret_len
, data
->dma_nents
, data
);
327 static void iser_data_buf_dump(struct iser_data_buf
*data
)
329 struct scatterlist
*sg
= (struct scatterlist
*)data
->buf
;
332 for (i
= 0; i
< data
->dma_nents
; i
++)
333 iser_err("sg[%d] dma_addr:0x%lX page:0x%p "
334 "off:0x%x sz:0x%x dma_len:0x%x\n",
335 i
, (unsigned long)sg_dma_address(&sg
[i
]),
336 sg
[i
].page
, sg
[i
].offset
,
337 sg
[i
].length
,sg_dma_len(&sg
[i
]));
340 static void iser_dump_page_vec(struct iser_page_vec
*page_vec
)
344 iser_err("page vec length %d data size %d\n",
345 page_vec
->length
, page_vec
->data_size
);
346 for (i
= 0; i
< page_vec
->length
; i
++)
347 iser_err("%d %lx\n",i
,(unsigned long)page_vec
->pages
[i
]);
350 static void iser_page_vec_build(struct iser_data_buf
*data
,
351 struct iser_page_vec
*page_vec
)
353 int page_vec_len
= 0;
355 page_vec
->length
= 0;
356 page_vec
->offset
= 0;
358 iser_dbg("Translating sg sz: %d\n", data
->dma_nents
);
359 page_vec_len
= iser_sg_to_page_vec(data
,page_vec
);
360 iser_dbg("sg len %d page_vec_len %d\n", data
->dma_nents
,page_vec_len
);
362 page_vec
->length
= page_vec_len
;
364 if (page_vec_len
* SIZE_4K
< page_vec
->data_size
) {
365 iser_err("page_vec too short to hold this SG\n");
366 iser_data_buf_dump(data
);
367 iser_dump_page_vec(page_vec
);
372 int iser_dma_map_task_data(struct iscsi_iser_cmd_task
*iser_ctask
,
373 struct iser_data_buf
*data
,
374 enum iser_data_dir iser_dir
,
375 enum dma_data_direction dma_dir
)
377 struct device
*dma_device
;
379 iser_ctask
->dir
[iser_dir
] = 1;
381 iser_ctask
->iser_conn
->ib_conn
->device
->ib_device
->dma_device
;
383 data
->dma_nents
= dma_map_sg(dma_device
, data
->buf
, data
->size
, dma_dir
);
384 if (data
->dma_nents
== 0) {
385 iser_err("dma_map_sg failed!!!\n");
391 void iser_dma_unmap_task_data(struct iscsi_iser_cmd_task
*iser_ctask
)
393 struct device
*dma_device
;
394 struct iser_data_buf
*data
;
397 iser_ctask
->iser_conn
->ib_conn
->device
->ib_device
->dma_device
;
399 if (iser_ctask
->dir
[ISER_DIR_IN
]) {
400 data
= &iser_ctask
->data
[ISER_DIR_IN
];
401 dma_unmap_sg(dma_device
, data
->buf
, data
->size
, DMA_FROM_DEVICE
);
404 if (iser_ctask
->dir
[ISER_DIR_OUT
]) {
405 data
= &iser_ctask
->data
[ISER_DIR_OUT
];
406 dma_unmap_sg(dma_device
, data
->buf
, data
->size
, DMA_TO_DEVICE
);
411 * iser_reg_rdma_mem - Registers memory intended for RDMA,
412 * obtaining rkey and va
414 * returns 0 on success, errno code on failure
416 int iser_reg_rdma_mem(struct iscsi_iser_cmd_task
*iser_ctask
,
417 enum iser_data_dir cmd_dir
)
419 struct iser_conn
*ib_conn
= iser_ctask
->iser_conn
->ib_conn
;
420 struct iser_device
*device
= ib_conn
->device
;
421 struct iser_data_buf
*mem
= &iser_ctask
->data
[cmd_dir
];
422 struct iser_regd_buf
*regd_buf
;
426 struct scatterlist
*sg
;
428 regd_buf
= &iser_ctask
->rdma_regd
[cmd_dir
];
430 aligned_len
= iser_data_buf_aligned_len(mem
);
431 if (aligned_len
!= mem
->dma_nents
) {
432 iser_err("rdma alignment violation %d/%d aligned\n",
433 aligned_len
, mem
->size
);
434 iser_data_buf_dump(mem
);
436 /* unmap the command data before accessing it */
437 iser_dma_unmap_task_data(iser_ctask
);
439 /* allocate copy buf, if we are writing, copy the */
440 /* unaligned scatterlist, dma map the copy */
441 if (iser_start_rdma_unaligned_sg(iser_ctask
, cmd_dir
) != 0)
443 mem
= &iser_ctask
->data_copy
[cmd_dir
];
446 /* if there a single dma entry, FMR is not needed */
447 if (mem
->dma_nents
== 1) {
448 sg
= (struct scatterlist
*)mem
->buf
;
450 regd_buf
->reg
.lkey
= device
->mr
->lkey
;
451 regd_buf
->reg
.rkey
= device
->mr
->rkey
;
452 regd_buf
->reg
.len
= sg_dma_len(&sg
[0]);
453 regd_buf
->reg
.va
= sg_dma_address(&sg
[0]);
454 regd_buf
->reg
.is_fmr
= 0;
456 iser_dbg("PHYSICAL Mem.register: lkey: 0x%08X rkey: 0x%08X "
457 "va: 0x%08lX sz: %ld]\n",
458 (unsigned int)regd_buf
->reg
.lkey
,
459 (unsigned int)regd_buf
->reg
.rkey
,
460 (unsigned long)regd_buf
->reg
.va
,
461 (unsigned long)regd_buf
->reg
.len
);
462 } else { /* use FMR for multiple dma entries */
463 iser_page_vec_build(mem
, ib_conn
->page_vec
);
464 err
= iser_reg_page_vec(ib_conn
, ib_conn
->page_vec
, ®d_buf
->reg
);
466 iser_data_buf_dump(mem
);
467 iser_err("mem->dma_nents = %d (dlength = 0x%x)\n", mem
->dma_nents
,
468 ntoh24(iser_ctask
->desc
.iscsi_header
.dlength
));
469 iser_err("page_vec: data_size = 0x%x, length = %d, offset = 0x%x\n",
470 ib_conn
->page_vec
->data_size
, ib_conn
->page_vec
->length
,
471 ib_conn
->page_vec
->offset
);
472 for (i
=0 ; i
<ib_conn
->page_vec
->length
; i
++)
473 iser_err("page_vec[%d] = 0x%llx\n", i
,
474 (unsigned long long) ib_conn
->page_vec
->pages
[i
]);
479 /* take a reference on this regd buf such that it will not be released *
480 * (eg in send dto completion) before we get the scsi response */
481 atomic_inc(®d_buf
->ref_count
);