ALSA: hda - remove unused clear of STATESTS
[deliverable/linux.git] / sound / pci / emu10k1 / memory.c
1 /*
2 * Copyright (c) by Jaroslav Kysela <perex@perex.cz>
3 * Copyright (c) by Takashi Iwai <tiwai@suse.de>
4 *
5 * EMU10K1 memory page allocation (PTB area)
6 *
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24 #include <linux/pci.h>
25 #include <linux/gfp.h>
26 #include <linux/time.h>
27 #include <linux/mutex.h>
28 #include <linux/export.h>
29
30 #include <sound/core.h>
31 #include <sound/emu10k1.h>
32
33 /* page arguments of these two macros are Emu page (4096 bytes), not like
34 * aligned pages in others
35 */
36 #define __set_ptb_entry(emu,page,addr) \
37 (((u32 *)(emu)->ptb_pages.area)[page] = cpu_to_le32(((addr) << 1) | (page)))
38
39 #define UNIT_PAGES (PAGE_SIZE / EMUPAGESIZE)
40 #define MAX_ALIGN_PAGES (MAXPAGES / UNIT_PAGES)
41 /* get aligned page from offset address */
42 #define get_aligned_page(offset) ((offset) >> PAGE_SHIFT)
43 /* get offset address from aligned page */
44 #define aligned_page_offset(page) ((page) << PAGE_SHIFT)
45
46 #if PAGE_SIZE == 4096
47 /* page size == EMUPAGESIZE */
48 /* fill PTB entrie(s) corresponding to page with addr */
49 #define set_ptb_entry(emu,page,addr) __set_ptb_entry(emu,page,addr)
50 /* fill PTB entrie(s) corresponding to page with silence pointer */
51 #define set_silent_ptb(emu,page) __set_ptb_entry(emu,page,emu->silent_page.addr)
52 #else
53 /* fill PTB entries -- we need to fill UNIT_PAGES entries */
54 static inline void set_ptb_entry(struct snd_emu10k1 *emu, int page, dma_addr_t addr)
55 {
56 int i;
57 page *= UNIT_PAGES;
58 for (i = 0; i < UNIT_PAGES; i++, page++) {
59 __set_ptb_entry(emu, page, addr);
60 addr += EMUPAGESIZE;
61 }
62 }
63 static inline void set_silent_ptb(struct snd_emu10k1 *emu, int page)
64 {
65 int i;
66 page *= UNIT_PAGES;
67 for (i = 0; i < UNIT_PAGES; i++, page++)
68 /* do not increment ptr */
69 __set_ptb_entry(emu, page, emu->silent_page.addr);
70 }
71 #endif /* PAGE_SIZE */
72
73
74 /*
75 */
76 static int synth_alloc_pages(struct snd_emu10k1 *hw, struct snd_emu10k1_memblk *blk);
77 static int synth_free_pages(struct snd_emu10k1 *hw, struct snd_emu10k1_memblk *blk);
78
79 #define get_emu10k1_memblk(l,member) list_entry(l, struct snd_emu10k1_memblk, member)
80
81
82 /* initialize emu10k1 part */
83 static void emu10k1_memblk_init(struct snd_emu10k1_memblk *blk)
84 {
85 blk->mapped_page = -1;
86 INIT_LIST_HEAD(&blk->mapped_link);
87 INIT_LIST_HEAD(&blk->mapped_order_link);
88 blk->map_locked = 0;
89
90 blk->first_page = get_aligned_page(blk->mem.offset);
91 blk->last_page = get_aligned_page(blk->mem.offset + blk->mem.size - 1);
92 blk->pages = blk->last_page - blk->first_page + 1;
93 }
94
95 /*
96 * search empty region on PTB with the given size
97 *
98 * if an empty region is found, return the page and store the next mapped block
99 * in nextp
100 * if not found, return a negative error code.
101 */
102 static int search_empty_map_area(struct snd_emu10k1 *emu, int npages, struct list_head **nextp)
103 {
104 int page = 0, found_page = -ENOMEM;
105 int max_size = npages;
106 int size;
107 struct list_head *candidate = &emu->mapped_link_head;
108 struct list_head *pos;
109
110 list_for_each (pos, &emu->mapped_link_head) {
111 struct snd_emu10k1_memblk *blk = get_emu10k1_memblk(pos, mapped_link);
112 if (blk->mapped_page < 0)
113 continue;
114 size = blk->mapped_page - page;
115 if (size == npages) {
116 *nextp = pos;
117 return page;
118 }
119 else if (size > max_size) {
120 /* we look for the maximum empty hole */
121 max_size = size;
122 candidate = pos;
123 found_page = page;
124 }
125 page = blk->mapped_page + blk->pages;
126 }
127 size = MAX_ALIGN_PAGES - page;
128 if (size >= max_size) {
129 *nextp = pos;
130 return page;
131 }
132 *nextp = candidate;
133 return found_page;
134 }
135
136 /*
137 * map a memory block onto emu10k1's PTB
138 *
139 * call with memblk_lock held
140 */
141 static int map_memblk(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
142 {
143 int page, pg;
144 struct list_head *next;
145
146 page = search_empty_map_area(emu, blk->pages, &next);
147 if (page < 0) /* not found */
148 return page;
149 /* insert this block in the proper position of mapped list */
150 list_add_tail(&blk->mapped_link, next);
151 /* append this as a newest block in order list */
152 list_add_tail(&blk->mapped_order_link, &emu->mapped_order_link_head);
153 blk->mapped_page = page;
154 /* fill PTB */
155 for (pg = blk->first_page; pg <= blk->last_page; pg++) {
156 set_ptb_entry(emu, page, emu->page_addr_table[pg]);
157 page++;
158 }
159 return 0;
160 }
161
162 /*
163 * unmap the block
164 * return the size of resultant empty pages
165 *
166 * call with memblk_lock held
167 */
168 static int unmap_memblk(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
169 {
170 int start_page, end_page, mpage, pg;
171 struct list_head *p;
172 struct snd_emu10k1_memblk *q;
173
174 /* calculate the expected size of empty region */
175 if ((p = blk->mapped_link.prev) != &emu->mapped_link_head) {
176 q = get_emu10k1_memblk(p, mapped_link);
177 start_page = q->mapped_page + q->pages;
178 } else
179 start_page = 0;
180 if ((p = blk->mapped_link.next) != &emu->mapped_link_head) {
181 q = get_emu10k1_memblk(p, mapped_link);
182 end_page = q->mapped_page;
183 } else
184 end_page = MAX_ALIGN_PAGES;
185
186 /* remove links */
187 list_del(&blk->mapped_link);
188 list_del(&blk->mapped_order_link);
189 /* clear PTB */
190 mpage = blk->mapped_page;
191 for (pg = blk->first_page; pg <= blk->last_page; pg++) {
192 set_silent_ptb(emu, mpage);
193 mpage++;
194 }
195 blk->mapped_page = -1;
196 return end_page - start_page; /* return the new empty size */
197 }
198
199 /*
200 * search empty pages with the given size, and create a memory block
201 *
202 * unlike synth_alloc the memory block is aligned to the page start
203 */
204 static struct snd_emu10k1_memblk *
205 search_empty(struct snd_emu10k1 *emu, int size)
206 {
207 struct list_head *p;
208 struct snd_emu10k1_memblk *blk;
209 int page, psize;
210
211 psize = get_aligned_page(size + PAGE_SIZE -1);
212 page = 0;
213 list_for_each(p, &emu->memhdr->block) {
214 blk = get_emu10k1_memblk(p, mem.list);
215 if (page + psize <= blk->first_page)
216 goto __found_pages;
217 page = blk->last_page + 1;
218 }
219 if (page + psize > emu->max_cache_pages)
220 return NULL;
221
222 __found_pages:
223 /* create a new memory block */
224 blk = (struct snd_emu10k1_memblk *)__snd_util_memblk_new(emu->memhdr, psize << PAGE_SHIFT, p->prev);
225 if (blk == NULL)
226 return NULL;
227 blk->mem.offset = aligned_page_offset(page); /* set aligned offset */
228 emu10k1_memblk_init(blk);
229 return blk;
230 }
231
232
233 /*
234 * check if the given pointer is valid for pages
235 */
236 static int is_valid_page(struct snd_emu10k1 *emu, dma_addr_t addr)
237 {
238 if (addr & ~emu->dma_mask) {
239 dev_err(emu->card->dev,
240 "max memory size is 0x%lx (addr = 0x%lx)!!\n",
241 emu->dma_mask, (unsigned long)addr);
242 return 0;
243 }
244 if (addr & (EMUPAGESIZE-1)) {
245 dev_err(emu->card->dev, "page is not aligned\n");
246 return 0;
247 }
248 return 1;
249 }
250
251 /*
252 * map the given memory block on PTB.
253 * if the block is already mapped, update the link order.
254 * if no empty pages are found, tries to release unused memory blocks
255 * and retry the mapping.
256 */
257 int snd_emu10k1_memblk_map(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
258 {
259 int err;
260 int size;
261 struct list_head *p, *nextp;
262 struct snd_emu10k1_memblk *deleted;
263 unsigned long flags;
264
265 spin_lock_irqsave(&emu->memblk_lock, flags);
266 if (blk->mapped_page >= 0) {
267 /* update order link */
268 list_move_tail(&blk->mapped_order_link,
269 &emu->mapped_order_link_head);
270 spin_unlock_irqrestore(&emu->memblk_lock, flags);
271 return 0;
272 }
273 if ((err = map_memblk(emu, blk)) < 0) {
274 /* no enough page - try to unmap some blocks */
275 /* starting from the oldest block */
276 p = emu->mapped_order_link_head.next;
277 for (; p != &emu->mapped_order_link_head; p = nextp) {
278 nextp = p->next;
279 deleted = get_emu10k1_memblk(p, mapped_order_link);
280 if (deleted->map_locked)
281 continue;
282 size = unmap_memblk(emu, deleted);
283 if (size >= blk->pages) {
284 /* ok the empty region is enough large */
285 err = map_memblk(emu, blk);
286 break;
287 }
288 }
289 }
290 spin_unlock_irqrestore(&emu->memblk_lock, flags);
291 return err;
292 }
293
294 EXPORT_SYMBOL(snd_emu10k1_memblk_map);
295
296 /*
297 * page allocation for DMA
298 */
299 struct snd_util_memblk *
300 snd_emu10k1_alloc_pages(struct snd_emu10k1 *emu, struct snd_pcm_substream *substream)
301 {
302 struct snd_pcm_runtime *runtime = substream->runtime;
303 struct snd_util_memhdr *hdr;
304 struct snd_emu10k1_memblk *blk;
305 int page, err, idx;
306
307 if (snd_BUG_ON(!emu))
308 return NULL;
309 if (snd_BUG_ON(runtime->dma_bytes <= 0 ||
310 runtime->dma_bytes >= MAXPAGES * EMUPAGESIZE))
311 return NULL;
312 hdr = emu->memhdr;
313 if (snd_BUG_ON(!hdr))
314 return NULL;
315
316 idx = runtime->period_size >= runtime->buffer_size ?
317 (emu->delay_pcm_irq * 2) : 0;
318 mutex_lock(&hdr->block_mutex);
319 blk = search_empty(emu, runtime->dma_bytes + idx);
320 if (blk == NULL) {
321 mutex_unlock(&hdr->block_mutex);
322 return NULL;
323 }
324 /* fill buffer addresses but pointers are not stored so that
325 * snd_free_pci_page() is not called in in synth_free()
326 */
327 idx = 0;
328 for (page = blk->first_page; page <= blk->last_page; page++, idx++) {
329 unsigned long ofs = idx << PAGE_SHIFT;
330 dma_addr_t addr;
331 if (ofs >= runtime->dma_bytes)
332 addr = emu->silent_page.addr;
333 else
334 addr = snd_pcm_sgbuf_get_addr(substream, ofs);
335 if (! is_valid_page(emu, addr)) {
336 dev_err(emu->card->dev,
337 "emu: failure page = %d\n", idx);
338 mutex_unlock(&hdr->block_mutex);
339 return NULL;
340 }
341 emu->page_addr_table[page] = addr;
342 emu->page_ptr_table[page] = NULL;
343 }
344
345 /* set PTB entries */
346 blk->map_locked = 1; /* do not unmap this block! */
347 err = snd_emu10k1_memblk_map(emu, blk);
348 if (err < 0) {
349 __snd_util_mem_free(hdr, (struct snd_util_memblk *)blk);
350 mutex_unlock(&hdr->block_mutex);
351 return NULL;
352 }
353 mutex_unlock(&hdr->block_mutex);
354 return (struct snd_util_memblk *)blk;
355 }
356
357
358 /*
359 * release DMA buffer from page table
360 */
361 int snd_emu10k1_free_pages(struct snd_emu10k1 *emu, struct snd_util_memblk *blk)
362 {
363 if (snd_BUG_ON(!emu || !blk))
364 return -EINVAL;
365 return snd_emu10k1_synth_free(emu, blk);
366 }
367
368
369 /*
370 * memory allocation using multiple pages (for synth)
371 * Unlike the DMA allocation above, non-contiguous pages are assined.
372 */
373
374 /*
375 * allocate a synth sample area
376 */
377 struct snd_util_memblk *
378 snd_emu10k1_synth_alloc(struct snd_emu10k1 *hw, unsigned int size)
379 {
380 struct snd_emu10k1_memblk *blk;
381 struct snd_util_memhdr *hdr = hw->memhdr;
382
383 mutex_lock(&hdr->block_mutex);
384 blk = (struct snd_emu10k1_memblk *)__snd_util_mem_alloc(hdr, size);
385 if (blk == NULL) {
386 mutex_unlock(&hdr->block_mutex);
387 return NULL;
388 }
389 if (synth_alloc_pages(hw, blk)) {
390 __snd_util_mem_free(hdr, (struct snd_util_memblk *)blk);
391 mutex_unlock(&hdr->block_mutex);
392 return NULL;
393 }
394 snd_emu10k1_memblk_map(hw, blk);
395 mutex_unlock(&hdr->block_mutex);
396 return (struct snd_util_memblk *)blk;
397 }
398
399 EXPORT_SYMBOL(snd_emu10k1_synth_alloc);
400
401 /*
402 * free a synth sample area
403 */
404 int
405 snd_emu10k1_synth_free(struct snd_emu10k1 *emu, struct snd_util_memblk *memblk)
406 {
407 struct snd_util_memhdr *hdr = emu->memhdr;
408 struct snd_emu10k1_memblk *blk = (struct snd_emu10k1_memblk *)memblk;
409 unsigned long flags;
410
411 mutex_lock(&hdr->block_mutex);
412 spin_lock_irqsave(&emu->memblk_lock, flags);
413 if (blk->mapped_page >= 0)
414 unmap_memblk(emu, blk);
415 spin_unlock_irqrestore(&emu->memblk_lock, flags);
416 synth_free_pages(emu, blk);
417 __snd_util_mem_free(hdr, memblk);
418 mutex_unlock(&hdr->block_mutex);
419 return 0;
420 }
421
422 EXPORT_SYMBOL(snd_emu10k1_synth_free);
423
424 /* check new allocation range */
425 static void get_single_page_range(struct snd_util_memhdr *hdr,
426 struct snd_emu10k1_memblk *blk,
427 int *first_page_ret, int *last_page_ret)
428 {
429 struct list_head *p;
430 struct snd_emu10k1_memblk *q;
431 int first_page, last_page;
432 first_page = blk->first_page;
433 if ((p = blk->mem.list.prev) != &hdr->block) {
434 q = get_emu10k1_memblk(p, mem.list);
435 if (q->last_page == first_page)
436 first_page++; /* first page was already allocated */
437 }
438 last_page = blk->last_page;
439 if ((p = blk->mem.list.next) != &hdr->block) {
440 q = get_emu10k1_memblk(p, mem.list);
441 if (q->first_page == last_page)
442 last_page--; /* last page was already allocated */
443 }
444 *first_page_ret = first_page;
445 *last_page_ret = last_page;
446 }
447
448 /* release allocated pages */
449 static void __synth_free_pages(struct snd_emu10k1 *emu, int first_page,
450 int last_page)
451 {
452 int page;
453
454 for (page = first_page; page <= last_page; page++) {
455 free_page((unsigned long)emu->page_ptr_table[page]);
456 emu->page_addr_table[page] = 0;
457 emu->page_ptr_table[page] = NULL;
458 }
459 }
460
461 /*
462 * allocate kernel pages
463 */
464 static int synth_alloc_pages(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
465 {
466 int page, first_page, last_page;
467
468 emu10k1_memblk_init(blk);
469 get_single_page_range(emu->memhdr, blk, &first_page, &last_page);
470 /* allocate kernel pages */
471 for (page = first_page; page <= last_page; page++) {
472 /* first try to allocate from <4GB zone */
473 struct page *p = alloc_page(GFP_KERNEL | GFP_DMA32 |
474 __GFP_NOWARN);
475 if (!p || (page_to_pfn(p) & ~(emu->dma_mask >> PAGE_SHIFT))) {
476 if (p)
477 __free_page(p);
478 /* try to allocate from <16MB zone */
479 p = alloc_page(GFP_ATOMIC | GFP_DMA |
480 __GFP_NORETRY | /* no OOM-killer */
481 __GFP_NOWARN);
482 }
483 if (!p) {
484 __synth_free_pages(emu, first_page, page - 1);
485 return -ENOMEM;
486 }
487 emu->page_addr_table[page] = page_to_phys(p);
488 emu->page_ptr_table[page] = page_address(p);
489 }
490 return 0;
491 }
492
493 /*
494 * free pages
495 */
496 static int synth_free_pages(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
497 {
498 int first_page, last_page;
499
500 get_single_page_range(emu->memhdr, blk, &first_page, &last_page);
501 __synth_free_pages(emu, first_page, last_page);
502 return 0;
503 }
504
505 /* calculate buffer pointer from offset address */
506 static inline void *offset_ptr(struct snd_emu10k1 *emu, int page, int offset)
507 {
508 char *ptr;
509 if (snd_BUG_ON(page < 0 || page >= emu->max_cache_pages))
510 return NULL;
511 ptr = emu->page_ptr_table[page];
512 if (! ptr) {
513 dev_err(emu->card->dev,
514 "access to NULL ptr: page = %d\n", page);
515 return NULL;
516 }
517 ptr += offset & (PAGE_SIZE - 1);
518 return (void*)ptr;
519 }
520
521 /*
522 * bzero(blk + offset, size)
523 */
524 int snd_emu10k1_synth_bzero(struct snd_emu10k1 *emu, struct snd_util_memblk *blk,
525 int offset, int size)
526 {
527 int page, nextofs, end_offset, temp, temp1;
528 void *ptr;
529 struct snd_emu10k1_memblk *p = (struct snd_emu10k1_memblk *)blk;
530
531 offset += blk->offset & (PAGE_SIZE - 1);
532 end_offset = offset + size;
533 page = get_aligned_page(offset);
534 do {
535 nextofs = aligned_page_offset(page + 1);
536 temp = nextofs - offset;
537 temp1 = end_offset - offset;
538 if (temp1 < temp)
539 temp = temp1;
540 ptr = offset_ptr(emu, page + p->first_page, offset);
541 if (ptr)
542 memset(ptr, 0, temp);
543 offset = nextofs;
544 page++;
545 } while (offset < end_offset);
546 return 0;
547 }
548
549 EXPORT_SYMBOL(snd_emu10k1_synth_bzero);
550
551 /*
552 * copy_from_user(blk + offset, data, size)
553 */
554 int snd_emu10k1_synth_copy_from_user(struct snd_emu10k1 *emu, struct snd_util_memblk *blk,
555 int offset, const char __user *data, int size)
556 {
557 int page, nextofs, end_offset, temp, temp1;
558 void *ptr;
559 struct snd_emu10k1_memblk *p = (struct snd_emu10k1_memblk *)blk;
560
561 offset += blk->offset & (PAGE_SIZE - 1);
562 end_offset = offset + size;
563 page = get_aligned_page(offset);
564 do {
565 nextofs = aligned_page_offset(page + 1);
566 temp = nextofs - offset;
567 temp1 = end_offset - offset;
568 if (temp1 < temp)
569 temp = temp1;
570 ptr = offset_ptr(emu, page + p->first_page, offset);
571 if (ptr && copy_from_user(ptr, data, temp))
572 return -EFAULT;
573 offset = nextofs;
574 data += temp;
575 page++;
576 } while (offset < end_offset);
577 return 0;
578 }
579
580 EXPORT_SYMBOL(snd_emu10k1_synth_copy_from_user);
This page took 0.04346 seconds and 5 git commands to generate.