UAPI: (Scripted) Convert #include "..." to #include <path/...> in drivers/gpu/
[deliverable/linux.git] / drivers / gpu / drm / via / via_mm.c
1 /*
2 * Copyright 2006 Tungsten Graphics Inc., Bismarck, ND., USA.
3 * All rights reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sub license,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the
13 * next paragraph) shall be included in all copies or substantial portions
14 * of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24 /*
25 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
26 */
27
28 #include <drm/drmP.h>
29 #include <drm/via_drm.h>
30 #include "via_drv.h"
31
32 #define VIA_MM_ALIGN_SHIFT 4
33 #define VIA_MM_ALIGN_MASK ((1 << VIA_MM_ALIGN_SHIFT) - 1)
34
35 struct via_memblock {
36 struct drm_mm_node mm_node;
37 struct list_head owner_list;
38 };
39
40 int via_agp_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
41 {
42 drm_via_agp_t *agp = data;
43 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
44
45 mutex_lock(&dev->struct_mutex);
46 drm_mm_init(&dev_priv->agp_mm, 0, agp->size >> VIA_MM_ALIGN_SHIFT);
47
48 dev_priv->agp_initialized = 1;
49 dev_priv->agp_offset = agp->offset;
50 mutex_unlock(&dev->struct_mutex);
51
52 DRM_DEBUG("offset = %u, size = %u\n", agp->offset, agp->size);
53 return 0;
54 }
55
56 int via_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
57 {
58 drm_via_fb_t *fb = data;
59 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
60
61 mutex_lock(&dev->struct_mutex);
62 drm_mm_init(&dev_priv->vram_mm, 0, fb->size >> VIA_MM_ALIGN_SHIFT);
63
64 dev_priv->vram_initialized = 1;
65 dev_priv->vram_offset = fb->offset;
66
67 mutex_unlock(&dev->struct_mutex);
68 DRM_DEBUG("offset = %u, size = %u\n", fb->offset, fb->size);
69
70 return 0;
71
72 }
73
74 int via_final_context(struct drm_device *dev, int context)
75 {
76 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
77
78 via_release_futex(dev_priv, context);
79
80 /* Linux specific until context tracking code gets ported to BSD */
81 /* Last context, perform cleanup */
82 if (dev->ctx_count == 1 && dev->dev_private) {
83 DRM_DEBUG("Last Context\n");
84 drm_irq_uninstall(dev);
85 via_cleanup_futex(dev_priv);
86 via_do_cleanup_map(dev);
87 }
88 return 1;
89 }
90
91 void via_lastclose(struct drm_device *dev)
92 {
93 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
94
95 if (!dev_priv)
96 return;
97
98 mutex_lock(&dev->struct_mutex);
99 if (dev_priv->vram_initialized) {
100 drm_mm_takedown(&dev_priv->vram_mm);
101 dev_priv->vram_initialized = 0;
102 }
103 if (dev_priv->agp_initialized) {
104 drm_mm_takedown(&dev_priv->agp_mm);
105 dev_priv->agp_initialized = 0;
106 }
107 mutex_unlock(&dev->struct_mutex);
108 }
109
110 int via_mem_alloc(struct drm_device *dev, void *data,
111 struct drm_file *file)
112 {
113 drm_via_mem_t *mem = data;
114 int retval = 0, user_key;
115 struct via_memblock *item;
116 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
117 struct via_file_private *file_priv = file->driver_priv;
118 unsigned long tmpSize;
119
120 if (mem->type > VIA_MEM_AGP) {
121 DRM_ERROR("Unknown memory type allocation\n");
122 return -EINVAL;
123 }
124 mutex_lock(&dev->struct_mutex);
125 if (0 == ((mem->type == VIA_MEM_VIDEO) ? dev_priv->vram_initialized :
126 dev_priv->agp_initialized)) {
127 DRM_ERROR
128 ("Attempt to allocate from uninitialized memory manager.\n");
129 mutex_unlock(&dev->struct_mutex);
130 return -EINVAL;
131 }
132
133 item = kzalloc(sizeof(*item), GFP_KERNEL);
134 if (!item) {
135 retval = -ENOMEM;
136 goto fail_alloc;
137 }
138
139 tmpSize = (mem->size + VIA_MM_ALIGN_MASK) >> VIA_MM_ALIGN_SHIFT;
140 if (mem->type == VIA_MEM_AGP)
141 retval = drm_mm_insert_node(&dev_priv->agp_mm,
142 &item->mm_node,
143 tmpSize, 0);
144 else
145 retval = drm_mm_insert_node(&dev_priv->vram_mm,
146 &item->mm_node,
147 tmpSize, 0);
148 if (retval)
149 goto fail_alloc;
150
151 again:
152 if (idr_pre_get(&dev_priv->object_idr, GFP_KERNEL) == 0) {
153 retval = -ENOMEM;
154 goto fail_idr;
155 }
156
157 retval = idr_get_new_above(&dev_priv->object_idr, item, 1, &user_key);
158 if (retval == -EAGAIN)
159 goto again;
160 if (retval)
161 goto fail_idr;
162
163 list_add(&item->owner_list, &file_priv->obj_list);
164 mutex_unlock(&dev->struct_mutex);
165
166 mem->offset = ((mem->type == VIA_MEM_VIDEO) ?
167 dev_priv->vram_offset : dev_priv->agp_offset) +
168 ((item->mm_node.start) << VIA_MM_ALIGN_SHIFT);
169 mem->index = user_key;
170
171 return 0;
172
173 fail_idr:
174 drm_mm_remove_node(&item->mm_node);
175 fail_alloc:
176 kfree(item);
177 mutex_unlock(&dev->struct_mutex);
178
179 mem->offset = 0;
180 mem->size = 0;
181 mem->index = 0;
182 DRM_DEBUG("Video memory allocation failed\n");
183
184 return retval;
185 }
186
187 int via_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
188 {
189 drm_via_private_t *dev_priv = dev->dev_private;
190 drm_via_mem_t *mem = data;
191 struct via_memblock *obj;
192
193 mutex_lock(&dev->struct_mutex);
194 obj = idr_find(&dev_priv->object_idr, mem->index);
195 if (obj == NULL) {
196 mutex_unlock(&dev->struct_mutex);
197 return -EINVAL;
198 }
199
200 idr_remove(&dev_priv->object_idr, mem->index);
201 list_del(&obj->owner_list);
202 drm_mm_remove_node(&obj->mm_node);
203 kfree(obj);
204 mutex_unlock(&dev->struct_mutex);
205
206 DRM_DEBUG("free = 0x%lx\n", mem->index);
207
208 return 0;
209 }
210
211
212 void via_reclaim_buffers_locked(struct drm_device *dev,
213 struct drm_file *file)
214 {
215 struct via_file_private *file_priv = file->driver_priv;
216 struct via_memblock *entry, *next;
217
218 if (!(file->minor->master && file->master->lock.hw_lock))
219 return;
220
221 drm_idlelock_take(&file->master->lock);
222
223 mutex_lock(&dev->struct_mutex);
224 if (list_empty(&file_priv->obj_list)) {
225 mutex_unlock(&dev->struct_mutex);
226 drm_idlelock_release(&file->master->lock);
227
228 return;
229 }
230
231 via_driver_dma_quiescent(dev);
232
233 list_for_each_entry_safe(entry, next, &file_priv->obj_list,
234 owner_list) {
235 list_del(&entry->owner_list);
236 drm_mm_remove_node(&entry->mm_node);
237 kfree(entry);
238 }
239 mutex_unlock(&dev->struct_mutex);
240
241 drm_idlelock_release(&file->master->lock);
242
243 return;
244 }
This page took 0.034812 seconds and 5 git commands to generate.