Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * arch/arm/common/dmabounce.c | |
3 | * | |
4 | * Special dma_{map/unmap/dma_sync}_* routines for systems that have | |
5 | * limited DMA windows. These functions utilize bounce buffers to | |
6 | * copy data to/from buffers located outside the DMA region. This | |
7 | * only works for systems in which DMA memory is at the bottom of | |
3a2916aa | 8 | * RAM, the remainder of memory is at the top and the DMA memory |
1da177e4 LT |
9 | * can be marked as ZONE_DMA. Anything beyond that such as discontigous |
10 | * DMA windows will require custom implementations that reserve memory | |
11 | * areas at early bootup. | |
12 | * | |
13 | * Original version by Brad Parker (brad@heeltoe.com) | |
14 | * Re-written by Christopher Hoover <ch@murgatroid.com> | |
15 | * Made generic by Deepak Saxena <dsaxena@plexity.net> | |
16 | * | |
17 | * Copyright (C) 2002 Hewlett Packard Company. | |
18 | * Copyright (C) 2004 MontaVista Software, Inc. | |
19 | * | |
20 | * This program is free software; you can redistribute it and/or | |
21 | * modify it under the terms of the GNU General Public License | |
22 | * version 2 as published by the Free Software Foundation. | |
23 | */ | |
24 | ||
25 | #include <linux/module.h> | |
26 | #include <linux/init.h> | |
27 | #include <linux/slab.h> | |
28 | #include <linux/device.h> | |
29 | #include <linux/dma-mapping.h> | |
30 | #include <linux/dmapool.h> | |
31 | #include <linux/list.h> | |
32 | ||
14eb75b6 RK |
33 | #include <asm/cacheflush.h> |
34 | ||
1da177e4 | 35 | #undef STATS |
cb7610d0 | 36 | |
1da177e4 LT |
37 | #ifdef STATS |
38 | #define DO_STATS(X) do { X ; } while (0) | |
39 | #else | |
40 | #define DO_STATS(X) do { } while (0) | |
41 | #endif | |
42 | ||
43 | /* ************************************************** */ | |
44 | ||
45 | struct safe_buffer { | |
46 | struct list_head node; | |
47 | ||
48 | /* original request */ | |
49 | void *ptr; | |
50 | size_t size; | |
51 | int direction; | |
52 | ||
53 | /* safe buffer info */ | |
cb7610d0 | 54 | struct dmabounce_pool *pool; |
1da177e4 LT |
55 | void *safe; |
56 | dma_addr_t safe_dma_addr; | |
57 | }; | |
58 | ||
cb7610d0 RK |
59 | struct dmabounce_pool { |
60 | unsigned long size; | |
61 | struct dma_pool *pool; | |
62 | #ifdef STATS | |
63 | unsigned long allocs; | |
64 | #endif | |
65 | }; | |
66 | ||
1da177e4 | 67 | struct dmabounce_device_info { |
1da177e4 | 68 | struct device *dev; |
1da177e4 | 69 | struct list_head safe_buffers; |
1da177e4 | 70 | #ifdef STATS |
1da177e4 LT |
71 | unsigned long total_allocs; |
72 | unsigned long map_op_count; | |
73 | unsigned long bounce_count; | |
017cc022 | 74 | int attr_res; |
1da177e4 | 75 | #endif |
cb7610d0 RK |
76 | struct dmabounce_pool small; |
77 | struct dmabounce_pool large; | |
823588c1 KH |
78 | |
79 | rwlock_t lock; | |
1da177e4 LT |
80 | }; |
81 | ||
1da177e4 | 82 | #ifdef STATS |
017cc022 RK |
83 | static ssize_t dmabounce_show(struct device *dev, struct device_attribute *attr, |
84 | char *buf) | |
1da177e4 | 85 | { |
017cc022 RK |
86 | struct dmabounce_device_info *device_info = dev->archdata.dmabounce; |
87 | return sprintf(buf, "%lu %lu %lu %lu %lu %lu\n", | |
88 | device_info->small.allocs, | |
89 | device_info->large.allocs, | |
cb7610d0 RK |
90 | device_info->total_allocs - device_info->small.allocs - |
91 | device_info->large.allocs, | |
017cc022 RK |
92 | device_info->total_allocs, |
93 | device_info->map_op_count, | |
94 | device_info->bounce_count); | |
1da177e4 | 95 | } |
017cc022 RK |
96 | |
97 | static DEVICE_ATTR(dmabounce_stats, 0400, dmabounce_show, NULL); | |
1da177e4 LT |
98 | #endif |
99 | ||
1da177e4 LT |
100 | |
101 | /* allocate a 'safe' buffer and keep track of it */ | |
102 | static inline struct safe_buffer * | |
103 | alloc_safe_buffer(struct dmabounce_device_info *device_info, void *ptr, | |
cb7610d0 | 104 | size_t size, enum dma_data_direction dir) |
1da177e4 LT |
105 | { |
106 | struct safe_buffer *buf; | |
cb7610d0 | 107 | struct dmabounce_pool *pool; |
1da177e4 | 108 | struct device *dev = device_info->dev; |
823588c1 | 109 | unsigned long flags; |
1da177e4 LT |
110 | |
111 | dev_dbg(dev, "%s(ptr=%p, size=%d, dir=%d)\n", | |
112 | __func__, ptr, size, dir); | |
113 | ||
cb7610d0 RK |
114 | if (size <= device_info->small.size) { |
115 | pool = &device_info->small; | |
116 | } else if (size <= device_info->large.size) { | |
117 | pool = &device_info->large; | |
118 | } else { | |
119 | pool = NULL; | |
120 | } | |
1da177e4 LT |
121 | |
122 | buf = kmalloc(sizeof(struct safe_buffer), GFP_ATOMIC); | |
123 | if (buf == NULL) { | |
124 | dev_warn(dev, "%s: kmalloc failed\n", __func__); | |
125 | return NULL; | |
126 | } | |
127 | ||
cb7610d0 RK |
128 | buf->ptr = ptr; |
129 | buf->size = size; | |
130 | buf->direction = dir; | |
131 | buf->pool = pool; | |
1da177e4 | 132 | |
cb7610d0 RK |
133 | if (pool) { |
134 | buf->safe = dma_pool_alloc(pool->pool, GFP_ATOMIC, | |
135 | &buf->safe_dma_addr); | |
1da177e4 | 136 | } else { |
cb7610d0 RK |
137 | buf->safe = dma_alloc_coherent(dev, size, &buf->safe_dma_addr, |
138 | GFP_ATOMIC); | |
1da177e4 LT |
139 | } |
140 | ||
cb7610d0 RK |
141 | if (buf->safe == NULL) { |
142 | dev_warn(dev, | |
143 | "%s: could not alloc dma memory (size=%d)\n", | |
144 | __func__, size); | |
1da177e4 LT |
145 | kfree(buf); |
146 | return NULL; | |
147 | } | |
148 | ||
149 | #ifdef STATS | |
cb7610d0 RK |
150 | if (pool) |
151 | pool->allocs++; | |
152 | device_info->total_allocs++; | |
1da177e4 LT |
153 | #endif |
154 | ||
823588c1 KH |
155 | write_lock_irqsave(&device_info->lock, flags); |
156 | ||
1da177e4 LT |
157 | list_add(&buf->node, &device_info->safe_buffers); |
158 | ||
823588c1 KH |
159 | write_unlock_irqrestore(&device_info->lock, flags); |
160 | ||
1da177e4 LT |
161 | return buf; |
162 | } | |
163 | ||
164 | /* determine if a buffer is from our "safe" pool */ | |
165 | static inline struct safe_buffer * | |
166 | find_safe_buffer(struct dmabounce_device_info *device_info, dma_addr_t safe_dma_addr) | |
167 | { | |
e2785f0d | 168 | struct safe_buffer *b, *rb = NULL; |
823588c1 KH |
169 | unsigned long flags; |
170 | ||
171 | read_lock_irqsave(&device_info->lock, flags); | |
1da177e4 | 172 | |
b46a58fd | 173 | list_for_each_entry(b, &device_info->safe_buffers, node) |
e2785f0d KH |
174 | if (b->safe_dma_addr == safe_dma_addr) { |
175 | rb = b; | |
823588c1 | 176 | break; |
e2785f0d | 177 | } |
1da177e4 | 178 | |
823588c1 | 179 | read_unlock_irqrestore(&device_info->lock, flags); |
e2785f0d | 180 | return rb; |
1da177e4 LT |
181 | } |
182 | ||
183 | static inline void | |
184 | free_safe_buffer(struct dmabounce_device_info *device_info, struct safe_buffer *buf) | |
185 | { | |
823588c1 KH |
186 | unsigned long flags; |
187 | ||
1da177e4 LT |
188 | dev_dbg(device_info->dev, "%s(buf=%p)\n", __func__, buf); |
189 | ||
823588c1 KH |
190 | write_lock_irqsave(&device_info->lock, flags); |
191 | ||
1da177e4 LT |
192 | list_del(&buf->node); |
193 | ||
823588c1 KH |
194 | write_unlock_irqrestore(&device_info->lock, flags); |
195 | ||
1da177e4 | 196 | if (buf->pool) |
cb7610d0 | 197 | dma_pool_free(buf->pool->pool, buf->safe, buf->safe_dma_addr); |
1da177e4 LT |
198 | else |
199 | dma_free_coherent(device_info->dev, buf->size, buf->safe, | |
200 | buf->safe_dma_addr); | |
201 | ||
202 | kfree(buf); | |
203 | } | |
204 | ||
205 | /* ************************************************** */ | |
206 | ||
1da177e4 LT |
207 | static inline dma_addr_t |
208 | map_single(struct device *dev, void *ptr, size_t size, | |
209 | enum dma_data_direction dir) | |
210 | { | |
ab2c2152 | 211 | struct dmabounce_device_info *device_info = dev->archdata.dmabounce; |
1da177e4 LT |
212 | dma_addr_t dma_addr; |
213 | int needs_bounce = 0; | |
214 | ||
215 | if (device_info) | |
216 | DO_STATS ( device_info->map_op_count++ ); | |
217 | ||
218 | dma_addr = virt_to_dma(dev, ptr); | |
219 | ||
220 | if (dev->dma_mask) { | |
221 | unsigned long mask = *dev->dma_mask; | |
222 | unsigned long limit; | |
223 | ||
224 | limit = (mask + 1) & ~mask; | |
225 | if (limit && size > limit) { | |
226 | dev_err(dev, "DMA mapping too big (requested %#x " | |
227 | "mask %#Lx)\n", size, *dev->dma_mask); | |
228 | return ~0; | |
229 | } | |
230 | ||
231 | /* | |
232 | * Figure out if we need to bounce from the DMA mask. | |
233 | */ | |
234 | needs_bounce = (dma_addr | (dma_addr + size - 1)) & ~mask; | |
235 | } | |
236 | ||
237 | if (device_info && (needs_bounce || dma_needs_bounce(dev, dma_addr, size))) { | |
238 | struct safe_buffer *buf; | |
239 | ||
240 | buf = alloc_safe_buffer(device_info, ptr, size, dir); | |
241 | if (buf == 0) { | |
242 | dev_err(dev, "%s: unable to map unsafe buffer %p!\n", | |
243 | __func__, ptr); | |
244 | return 0; | |
245 | } | |
246 | ||
247 | dev_dbg(dev, | |
248 | "%s: unsafe buffer %p (phy=%p) mapped to %p (phy=%p)\n", | |
249 | __func__, buf->ptr, (void *) virt_to_dma(dev, buf->ptr), | |
250 | buf->safe, (void *) buf->safe_dma_addr); | |
251 | ||
252 | if ((dir == DMA_TO_DEVICE) || | |
253 | (dir == DMA_BIDIRECTIONAL)) { | |
254 | dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n", | |
255 | __func__, ptr, buf->safe, size); | |
256 | memcpy(buf->safe, ptr, size); | |
257 | } | |
cb7610d0 | 258 | ptr = buf->safe; |
1da177e4 LT |
259 | |
260 | dma_addr = buf->safe_dma_addr; | |
7f8e3354 RK |
261 | } else { |
262 | /* | |
263 | * We don't need to sync the DMA buffer since | |
264 | * it was allocated via the coherent allocators. | |
265 | */ | |
266 | consistent_sync(ptr, size, dir); | |
1da177e4 LT |
267 | } |
268 | ||
269 | return dma_addr; | |
270 | } | |
271 | ||
272 | static inline void | |
273 | unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | |
274 | enum dma_data_direction dir) | |
275 | { | |
ab2c2152 | 276 | struct dmabounce_device_info *device_info = dev->archdata.dmabounce; |
1da177e4 LT |
277 | struct safe_buffer *buf = NULL; |
278 | ||
279 | /* | |
280 | * Trying to unmap an invalid mapping | |
281 | */ | |
cb7610d0 | 282 | if (dma_mapping_error(dma_addr)) { |
1da177e4 LT |
283 | dev_err(dev, "Trying to unmap invalid mapping\n"); |
284 | return; | |
285 | } | |
286 | ||
287 | if (device_info) | |
288 | buf = find_safe_buffer(device_info, dma_addr); | |
289 | ||
290 | if (buf) { | |
291 | BUG_ON(buf->size != size); | |
292 | ||
293 | dev_dbg(dev, | |
294 | "%s: unsafe buffer %p (phy=%p) mapped to %p (phy=%p)\n", | |
295 | __func__, buf->ptr, (void *) virt_to_dma(dev, buf->ptr), | |
296 | buf->safe, (void *) buf->safe_dma_addr); | |
297 | ||
1da177e4 LT |
298 | DO_STATS ( device_info->bounce_count++ ); |
299 | ||
5abc100e | 300 | if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) { |
7ae5a761 | 301 | void *ptr = buf->ptr; |
5abc100e | 302 | |
1da177e4 LT |
303 | dev_dbg(dev, |
304 | "%s: copy back safe %p to unsafe %p size %d\n", | |
7ae5a761 RK |
305 | __func__, buf->safe, ptr, size); |
306 | memcpy(ptr, buf->safe, size); | |
5abc100e RK |
307 | |
308 | /* | |
309 | * DMA buffers must have the same cache properties | |
310 | * as if they were really used for DMA - which means | |
311 | * data must be written back to RAM. Note that | |
312 | * we don't use dmac_flush_range() here for the | |
313 | * bidirectional case because we know the cache | |
314 | * lines will be coherent with the data written. | |
315 | */ | |
5abc100e | 316 | dmac_clean_range(ptr, ptr + size); |
953233dc | 317 | outer_clean_range(__pa(ptr), __pa(ptr) + size); |
1da177e4 LT |
318 | } |
319 | free_safe_buffer(device_info, buf); | |
320 | } | |
321 | } | |
322 | ||
323 | static inline void | |
324 | sync_single(struct device *dev, dma_addr_t dma_addr, size_t size, | |
325 | enum dma_data_direction dir) | |
326 | { | |
ab2c2152 | 327 | struct dmabounce_device_info *device_info = dev->archdata.dmabounce; |
1da177e4 LT |
328 | struct safe_buffer *buf = NULL; |
329 | ||
330 | if (device_info) | |
331 | buf = find_safe_buffer(device_info, dma_addr); | |
332 | ||
333 | if (buf) { | |
334 | /* | |
335 | * Both of these checks from original code need to be | |
336 | * commented out b/c some drivers rely on the following: | |
337 | * | |
338 | * 1) Drivers may map a large chunk of memory into DMA space | |
339 | * but only sync a small portion of it. Good example is | |
340 | * allocating a large buffer, mapping it, and then | |
341 | * breaking it up into small descriptors. No point | |
342 | * in syncing the whole buffer if you only have to | |
343 | * touch one descriptor. | |
344 | * | |
345 | * 2) Buffers that are mapped as DMA_BIDIRECTIONAL are | |
346 | * usually only synced in one dir at a time. | |
347 | * | |
348 | * See drivers/net/eepro100.c for examples of both cases. | |
349 | * | |
350 | * -ds | |
351 | * | |
352 | * BUG_ON(buf->size != size); | |
353 | * BUG_ON(buf->direction != dir); | |
354 | */ | |
355 | ||
356 | dev_dbg(dev, | |
357 | "%s: unsafe buffer %p (phy=%p) mapped to %p (phy=%p)\n", | |
358 | __func__, buf->ptr, (void *) virt_to_dma(dev, buf->ptr), | |
359 | buf->safe, (void *) buf->safe_dma_addr); | |
360 | ||
361 | DO_STATS ( device_info->bounce_count++ ); | |
362 | ||
363 | switch (dir) { | |
364 | case DMA_FROM_DEVICE: | |
365 | dev_dbg(dev, | |
366 | "%s: copy back safe %p to unsafe %p size %d\n", | |
367 | __func__, buf->safe, buf->ptr, size); | |
368 | memcpy(buf->ptr, buf->safe, size); | |
369 | break; | |
370 | case DMA_TO_DEVICE: | |
371 | dev_dbg(dev, | |
372 | "%s: copy out unsafe %p to safe %p, size %d\n", | |
373 | __func__,buf->ptr, buf->safe, size); | |
374 | memcpy(buf->safe, buf->ptr, size); | |
375 | break; | |
376 | case DMA_BIDIRECTIONAL: | |
377 | BUG(); /* is this allowed? what does it mean? */ | |
378 | default: | |
379 | BUG(); | |
380 | } | |
7f8e3354 RK |
381 | /* |
382 | * No need to sync the safe buffer - it was allocated | |
383 | * via the coherent allocators. | |
384 | */ | |
1da177e4 LT |
385 | } else { |
386 | consistent_sync(dma_to_virt(dev, dma_addr), size, dir); | |
387 | } | |
388 | } | |
389 | ||
390 | /* ************************************************** */ | |
391 | ||
392 | /* | |
393 | * see if a buffer address is in an 'unsafe' range. if it is | |
394 | * allocate a 'safe' buffer and copy the unsafe buffer into it. | |
395 | * substitute the safe buffer for the unsafe one. | |
396 | * (basically move the buffer from an unsafe area to a safe one) | |
397 | */ | |
398 | dma_addr_t | |
399 | dma_map_single(struct device *dev, void *ptr, size_t size, | |
400 | enum dma_data_direction dir) | |
401 | { | |
1da177e4 LT |
402 | dma_addr_t dma_addr; |
403 | ||
404 | dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", | |
405 | __func__, ptr, size, dir); | |
406 | ||
407 | BUG_ON(dir == DMA_NONE); | |
408 | ||
1da177e4 LT |
409 | dma_addr = map_single(dev, ptr, size, dir); |
410 | ||
1da177e4 LT |
411 | return dma_addr; |
412 | } | |
413 | ||
414 | /* | |
415 | * see if a mapped address was really a "safe" buffer and if so, copy | |
416 | * the data from the safe buffer back to the unsafe buffer and free up | |
417 | * the safe buffer. (basically return things back to the way they | |
418 | * should be) | |
419 | */ | |
420 | ||
421 | void | |
422 | dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | |
423 | enum dma_data_direction dir) | |
424 | { | |
1da177e4 LT |
425 | dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", |
426 | __func__, (void *) dma_addr, size, dir); | |
427 | ||
428 | BUG_ON(dir == DMA_NONE); | |
429 | ||
1da177e4 | 430 | unmap_single(dev, dma_addr, size, dir); |
1da177e4 LT |
431 | } |
432 | ||
433 | int | |
434 | dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | |
435 | enum dma_data_direction dir) | |
436 | { | |
1da177e4 LT |
437 | int i; |
438 | ||
439 | dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n", | |
440 | __func__, sg, nents, dir); | |
441 | ||
442 | BUG_ON(dir == DMA_NONE); | |
443 | ||
1da177e4 LT |
444 | for (i = 0; i < nents; i++, sg++) { |
445 | struct page *page = sg->page; | |
446 | unsigned int offset = sg->offset; | |
447 | unsigned int length = sg->length; | |
448 | void *ptr = page_address(page) + offset; | |
449 | ||
450 | sg->dma_address = | |
451 | map_single(dev, ptr, length, dir); | |
452 | } | |
453 | ||
1da177e4 LT |
454 | return nents; |
455 | } | |
456 | ||
457 | void | |
458 | dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, | |
459 | enum dma_data_direction dir) | |
460 | { | |
1da177e4 LT |
461 | int i; |
462 | ||
463 | dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n", | |
464 | __func__, sg, nents, dir); | |
465 | ||
466 | BUG_ON(dir == DMA_NONE); | |
467 | ||
1da177e4 LT |
468 | for (i = 0; i < nents; i++, sg++) { |
469 | dma_addr_t dma_addr = sg->dma_address; | |
470 | unsigned int length = sg->length; | |
471 | ||
472 | unmap_single(dev, dma_addr, length, dir); | |
473 | } | |
1da177e4 LT |
474 | } |
475 | ||
476 | void | |
477 | dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_addr, size_t size, | |
478 | enum dma_data_direction dir) | |
479 | { | |
1da177e4 LT |
480 | dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", |
481 | __func__, (void *) dma_addr, size, dir); | |
482 | ||
1da177e4 | 483 | sync_single(dev, dma_addr, size, dir); |
1da177e4 LT |
484 | } |
485 | ||
486 | void | |
487 | dma_sync_single_for_device(struct device *dev, dma_addr_t dma_addr, size_t size, | |
488 | enum dma_data_direction dir) | |
489 | { | |
1da177e4 LT |
490 | dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", |
491 | __func__, (void *) dma_addr, size, dir); | |
492 | ||
1da177e4 | 493 | sync_single(dev, dma_addr, size, dir); |
1da177e4 LT |
494 | } |
495 | ||
496 | void | |
497 | dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents, | |
498 | enum dma_data_direction dir) | |
499 | { | |
1da177e4 LT |
500 | int i; |
501 | ||
502 | dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n", | |
503 | __func__, sg, nents, dir); | |
504 | ||
505 | BUG_ON(dir == DMA_NONE); | |
506 | ||
1da177e4 LT |
507 | for (i = 0; i < nents; i++, sg++) { |
508 | dma_addr_t dma_addr = sg->dma_address; | |
509 | unsigned int length = sg->length; | |
510 | ||
511 | sync_single(dev, dma_addr, length, dir); | |
512 | } | |
1da177e4 LT |
513 | } |
514 | ||
515 | void | |
516 | dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents, | |
517 | enum dma_data_direction dir) | |
518 | { | |
1da177e4 LT |
519 | int i; |
520 | ||
521 | dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n", | |
522 | __func__, sg, nents, dir); | |
523 | ||
524 | BUG_ON(dir == DMA_NONE); | |
525 | ||
1da177e4 LT |
526 | for (i = 0; i < nents; i++, sg++) { |
527 | dma_addr_t dma_addr = sg->dma_address; | |
528 | unsigned int length = sg->length; | |
529 | ||
530 | sync_single(dev, dma_addr, length, dir); | |
531 | } | |
1da177e4 LT |
532 | } |
533 | ||
cb7610d0 RK |
534 | static int |
535 | dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev, const char *name, | |
536 | unsigned long size) | |
537 | { | |
538 | pool->size = size; | |
539 | DO_STATS(pool->allocs = 0); | |
540 | pool->pool = dma_pool_create(name, dev, size, | |
541 | 0 /* byte alignment */, | |
542 | 0 /* no page-crossing issues */); | |
543 | ||
544 | return pool->pool ? 0 : -ENOMEM; | |
545 | } | |
546 | ||
1da177e4 LT |
547 | int |
548 | dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size, | |
549 | unsigned long large_buffer_size) | |
550 | { | |
551 | struct dmabounce_device_info *device_info; | |
cb7610d0 | 552 | int ret; |
1da177e4 LT |
553 | |
554 | device_info = kmalloc(sizeof(struct dmabounce_device_info), GFP_ATOMIC); | |
555 | if (!device_info) { | |
556 | printk(KERN_ERR | |
557 | "Could not allocated dmabounce_device_info for %s", | |
558 | dev->bus_id); | |
559 | return -ENOMEM; | |
560 | } | |
561 | ||
cb7610d0 RK |
562 | ret = dmabounce_init_pool(&device_info->small, dev, |
563 | "small_dmabounce_pool", small_buffer_size); | |
564 | if (ret) { | |
565 | dev_err(dev, | |
566 | "dmabounce: could not allocate DMA pool for %ld byte objects\n", | |
567 | small_buffer_size); | |
568 | goto err_free; | |
1da177e4 LT |
569 | } |
570 | ||
571 | if (large_buffer_size) { | |
cb7610d0 RK |
572 | ret = dmabounce_init_pool(&device_info->large, dev, |
573 | "large_dmabounce_pool", | |
574 | large_buffer_size); | |
575 | if (ret) { | |
576 | dev_err(dev, | |
577 | "dmabounce: could not allocate DMA pool for %ld byte objects\n", | |
578 | large_buffer_size); | |
579 | goto err_destroy; | |
1da177e4 LT |
580 | } |
581 | } | |
582 | ||
583 | device_info->dev = dev; | |
1da177e4 | 584 | INIT_LIST_HEAD(&device_info->safe_buffers); |
823588c1 | 585 | rwlock_init(&device_info->lock); |
1da177e4 LT |
586 | |
587 | #ifdef STATS | |
1da177e4 LT |
588 | device_info->total_allocs = 0; |
589 | device_info->map_op_count = 0; | |
590 | device_info->bounce_count = 0; | |
017cc022 | 591 | device_info->attr_res = device_create_file(dev, &dev_attr_dmabounce_stats); |
1da177e4 LT |
592 | #endif |
593 | ||
ab2c2152 | 594 | dev->archdata.dmabounce = device_info; |
1da177e4 LT |
595 | |
596 | printk(KERN_INFO "dmabounce: registered device %s on %s bus\n", | |
597 | dev->bus_id, dev->bus->name); | |
598 | ||
599 | return 0; | |
cb7610d0 RK |
600 | |
601 | err_destroy: | |
602 | dma_pool_destroy(device_info->small.pool); | |
603 | err_free: | |
604 | kfree(device_info); | |
605 | return ret; | |
1da177e4 LT |
606 | } |
607 | ||
608 | void | |
609 | dmabounce_unregister_dev(struct device *dev) | |
610 | { | |
ab2c2152 RK |
611 | struct dmabounce_device_info *device_info = dev->archdata.dmabounce; |
612 | ||
613 | dev->archdata.dmabounce = NULL; | |
1da177e4 LT |
614 | |
615 | if (!device_info) { | |
616 | printk(KERN_WARNING | |
617 | "%s: Never registered with dmabounce but attempting" \ | |
618 | "to unregister!\n", dev->bus_id); | |
619 | return; | |
620 | } | |
621 | ||
622 | if (!list_empty(&device_info->safe_buffers)) { | |
623 | printk(KERN_ERR | |
624 | "%s: Removing from dmabounce with pending buffers!\n", | |
625 | dev->bus_id); | |
626 | BUG(); | |
627 | } | |
628 | ||
cb7610d0 RK |
629 | if (device_info->small.pool) |
630 | dma_pool_destroy(device_info->small.pool); | |
631 | if (device_info->large.pool) | |
632 | dma_pool_destroy(device_info->large.pool); | |
1da177e4 LT |
633 | |
634 | #ifdef STATS | |
017cc022 RK |
635 | if (device_info->attr_res == 0) |
636 | device_remove_file(dev, &dev_attr_dmabounce_stats); | |
1da177e4 LT |
637 | #endif |
638 | ||
1da177e4 LT |
639 | kfree(device_info); |
640 | ||
641 | printk(KERN_INFO "dmabounce: device %s on %s bus unregistered\n", | |
642 | dev->bus_id, dev->bus->name); | |
643 | } | |
644 | ||
645 | ||
646 | EXPORT_SYMBOL(dma_map_single); | |
647 | EXPORT_SYMBOL(dma_unmap_single); | |
648 | EXPORT_SYMBOL(dma_map_sg); | |
649 | EXPORT_SYMBOL(dma_unmap_sg); | |
73218187 KH |
650 | EXPORT_SYMBOL(dma_sync_single_for_cpu); |
651 | EXPORT_SYMBOL(dma_sync_single_for_device); | |
1da177e4 LT |
652 | EXPORT_SYMBOL(dma_sync_sg); |
653 | EXPORT_SYMBOL(dmabounce_register_dev); | |
654 | EXPORT_SYMBOL(dmabounce_unregister_dev); | |
655 | ||
656 | MODULE_AUTHOR("Christopher Hoover <ch@hpl.hp.com>, Deepak Saxena <dsaxena@plexity.net>"); | |
657 | MODULE_DESCRIPTION("Special dma_{map/unmap/dma_sync}_* routines for systems with limited DMA windows"); | |
658 | MODULE_LICENSE("GPL"); |