Merge remote-tracking branch 'staging/staging-next'
[deliverable/linux.git] / drivers / staging / android / ion / ion_carveout_heap.c
CommitLineData
c30707be
RSZ
1/*
2 * drivers/staging/android/ion/ion_carveout_heap.c
3 *
4 * Copyright (C) 2011 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16#include <linux/spinlock.h>
ed5bf01a 17#include <linux/dma-mapping.h>
c30707be
RSZ
18#include <linux/err.h>
19#include <linux/genalloc.h>
20#include <linux/io.h>
21#include <linux/mm.h>
22#include <linux/scatterlist.h>
23#include <linux/slab.h>
24#include <linux/vmalloc.h>
25#include "ion.h"
26#include "ion_priv.h"
27
0ba5aa47
LA
28#define ION_CARVEOUT_ALLOCATE_FAIL -1
29
c30707be
RSZ
30struct ion_carveout_heap {
31 struct ion_heap heap;
32 struct gen_pool *pool;
33 ion_phys_addr_t base;
34};
35
36ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap,
37 unsigned long size,
38 unsigned long align)
39{
40 struct ion_carveout_heap *carveout_heap =
41 container_of(heap, struct ion_carveout_heap, heap);
42 unsigned long offset = gen_pool_alloc(carveout_heap->pool, size);
43
44 if (!offset)
45 return ION_CARVEOUT_ALLOCATE_FAIL;
46
47 return offset;
48}
49
50void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
51 unsigned long size)
52{
53 struct ion_carveout_heap *carveout_heap =
54 container_of(heap, struct ion_carveout_heap, heap);
55
56 if (addr == ION_CARVEOUT_ALLOCATE_FAIL)
57 return;
58 gen_pool_free(carveout_heap->pool, addr, size);
59}
60
c30707be
RSZ
61static int ion_carveout_heap_allocate(struct ion_heap *heap,
62 struct ion_buffer *buffer,
63 unsigned long size, unsigned long align,
64 unsigned long flags)
65{
ed5bf01a
CC
66 struct sg_table *table;
67 ion_phys_addr_t paddr;
68 int ret;
69
f0f06763
CC
70 if (align > PAGE_SIZE)
71 return -EINVAL;
72
7c328cbd 73 table = kmalloc(sizeof(*table), GFP_KERNEL);
ed5bf01a
CC
74 if (!table)
75 return -ENOMEM;
76 ret = sg_alloc_table(table, 1, GFP_KERNEL);
77 if (ret)
78 goto err_free;
79
80 paddr = ion_carveout_allocate(heap, size, align);
81 if (paddr == ION_CARVEOUT_ALLOCATE_FAIL) {
82 ret = -ENOMEM;
83 goto err_free_table;
84 }
85
86 sg_set_page(table->sgl, pfn_to_page(PFN_DOWN(paddr)), size, 0);
f82ad60e 87 buffer->sg_table = table;
ed5bf01a
CC
88
89 return 0;
90
91err_free_table:
92 sg_free_table(table);
93err_free:
94 kfree(table);
95 return ret;
c30707be
RSZ
96}
97
98static void ion_carveout_heap_free(struct ion_buffer *buffer)
99{
100 struct ion_heap *heap = buffer->heap;
f82ad60e 101 struct sg_table *table = buffer->sg_table;
ed5bf01a
CC
102 struct page *page = sg_page(table->sgl);
103 ion_phys_addr_t paddr = PFN_PHYS(page_to_pfn(page));
104
105 ion_heap_buffer_zero(buffer);
c30707be 106
ed5bf01a
CC
107 if (ion_buffer_cached(buffer))
108 dma_sync_sg_for_device(NULL, table->sgl, table->nents,
5821a33b 109 DMA_BIDIRECTIONAL);
ed5bf01a
CC
110
111 ion_carveout_free(heap, paddr, buffer->size);
112 sg_free_table(table);
113 kfree(table);
c30707be
RSZ
114}
115
c30707be
RSZ
116static struct ion_heap_ops carveout_heap_ops = {
117 .allocate = ion_carveout_heap_allocate,
118 .free = ion_carveout_heap_free,
8be3759a
CC
119 .map_user = ion_heap_map_user,
120 .map_kernel = ion_heap_map_kernel,
121 .unmap_kernel = ion_heap_unmap_kernel,
c30707be
RSZ
122};
123
124struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data)
125{
126 struct ion_carveout_heap *carveout_heap;
df6cf5c8
CC
127 int ret;
128
129 struct page *page;
130 size_t size;
131
132 page = pfn_to_page(PFN_DOWN(heap_data->base));
133 size = heap_data->size;
134
135 ion_pages_sync_for_device(NULL, page, size, DMA_BIDIRECTIONAL);
136
137 ret = ion_heap_pages_zero(page, size, pgprot_writecombine(PAGE_KERNEL));
138 if (ret)
139 return ERR_PTR(ret);
c30707be 140
c1586b52 141 carveout_heap = kzalloc(sizeof(*carveout_heap), GFP_KERNEL);
c30707be
RSZ
142 if (!carveout_heap)
143 return ERR_PTR(-ENOMEM);
144
1328d8ef 145 carveout_heap->pool = gen_pool_create(PAGE_SHIFT, -1);
c30707be
RSZ
146 if (!carveout_heap->pool) {
147 kfree(carveout_heap);
148 return ERR_PTR(-ENOMEM);
149 }
150 carveout_heap->base = heap_data->base;
151 gen_pool_add(carveout_heap->pool, carveout_heap->base, heap_data->size,
152 -1);
153 carveout_heap->heap.ops = &carveout_heap_ops;
154 carveout_heap->heap.type = ION_HEAP_TYPE_CARVEOUT;
ed5bf01a 155 carveout_heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
c30707be
RSZ
156
157 return &carveout_heap->heap;
158}
159
160void ion_carveout_heap_destroy(struct ion_heap *heap)
161{
162 struct ion_carveout_heap *carveout_heap =
163 container_of(heap, struct ion_carveout_heap, heap);
164
165 gen_pool_destroy(carveout_heap->pool);
166 kfree(carveout_heap);
167 carveout_heap = NULL;
168}
This page took 0.32055 seconds and 5 git commands to generate.