Commit | Line | Data |
---|---|---|
673a394b EA |
1 | /* |
2 | * Copyright © 2008 Intel Corporation | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice (including the next | |
12 | * paragraph) shall be included in all copies or substantial portions of the | |
13 | * Software. | |
14 | * | |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |
21 | * IN THE SOFTWARE. | |
22 | * | |
23 | * Authors: | |
24 | * Keith Packard <keithp@keithp.com> | |
25 | * | |
26 | */ | |
27 | ||
28 | #include "drmP.h" | |
29 | #include "drm.h" | |
30 | #include "i915_drm.h" | |
31 | #include "i915_drv.h" | |
32 | ||
23bc5982 CW |
33 | #if WATCH_LISTS |
34 | int | |
35 | i915_verify_lists(struct drm_device *dev) | |
673a394b | 36 | { |
23bc5982 | 37 | static int warned; |
673a394b | 38 | drm_i915_private_t *dev_priv = dev->dev_private; |
23bc5982 CW |
39 | struct drm_i915_gem_object *obj; |
40 | int err = 0; | |
41 | ||
42 | if (warned) | |
43 | return 0; | |
44 | ||
45 | list_for_each_entry(obj, &dev_priv->render_ring.active_list, list) { | |
46 | if (obj->base.dev != dev || | |
47 | !atomic_read(&obj->base.refcount.refcount)) { | |
48 | DRM_ERROR("freed render active %p\n", obj); | |
49 | err++; | |
50 | break; | |
51 | } else if (!obj->active || | |
52 | (obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0) { | |
53 | DRM_ERROR("invalid render active %p (a %d r %x)\n", | |
54 | obj, | |
55 | obj->active, | |
56 | obj->base.read_domains); | |
57 | err++; | |
58 | } else if (obj->base.write_domain && list_empty(&obj->gpu_write_list)) { | |
59 | DRM_ERROR("invalid render active %p (w %x, gwl %d)\n", | |
60 | obj, | |
61 | obj->base.write_domain, | |
62 | !list_empty(&obj->gpu_write_list)); | |
63 | err++; | |
64 | } | |
65 | } | |
66 | ||
67 | list_for_each_entry(obj, &dev_priv->mm.flushing_list, list) { | |
68 | if (obj->base.dev != dev || | |
69 | !atomic_read(&obj->base.refcount.refcount)) { | |
70 | DRM_ERROR("freed flushing %p\n", obj); | |
71 | err++; | |
72 | break; | |
73 | } else if (!obj->active || | |
74 | (obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0 || | |
75 | list_empty(&obj->gpu_write_list)){ | |
76 | DRM_ERROR("invalid flushing %p (a %d w %x gwl %d)\n", | |
77 | obj, | |
78 | obj->active, | |
79 | obj->base.write_domain, | |
80 | !list_empty(&obj->gpu_write_list)); | |
81 | err++; | |
82 | } | |
83 | } | |
84 | ||
85 | list_for_each_entry(obj, &dev_priv->mm.gpu_write_list, gpu_write_list) { | |
86 | if (obj->base.dev != dev || | |
87 | !atomic_read(&obj->base.refcount.refcount)) { | |
88 | DRM_ERROR("freed gpu write %p\n", obj); | |
89 | err++; | |
90 | break; | |
91 | } else if (!obj->active || | |
92 | (obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0) { | |
93 | DRM_ERROR("invalid gpu write %p (a %d w %x)\n", | |
94 | obj, | |
95 | obj->active, | |
96 | obj->base.write_domain); | |
97 | err++; | |
98 | } | |
99 | } | |
673a394b | 100 | |
23bc5982 CW |
101 | list_for_each_entry(obj, &dev_priv->mm.inactive_list, list) { |
102 | if (obj->base.dev != dev || | |
103 | !atomic_read(&obj->base.refcount.refcount)) { | |
104 | DRM_ERROR("freed inactive %p\n", obj); | |
105 | err++; | |
106 | break; | |
107 | } else if (obj->pin_count || obj->active || | |
108 | (obj->base.write_domain & I915_GEM_GPU_DOMAINS)) { | |
109 | DRM_ERROR("invalid inactive %p (p %d a %d w %x)\n", | |
673a394b | 110 | obj, |
23bc5982 CW |
111 | obj->pin_count, obj->active, |
112 | obj->base.write_domain); | |
113 | err++; | |
114 | } | |
673a394b | 115 | } |
23bc5982 CW |
116 | |
117 | list_for_each_entry(obj, &dev_priv->mm.pinned_list, list) { | |
118 | if (obj->base.dev != dev || | |
119 | !atomic_read(&obj->base.refcount.refcount)) { | |
120 | DRM_ERROR("freed pinned %p\n", obj); | |
121 | err++; | |
122 | break; | |
123 | } else if (!obj->pin_count || obj->active || | |
124 | (obj->base.write_domain & I915_GEM_GPU_DOMAINS)) { | |
125 | DRM_ERROR("invalid pinned %p (p %d a %d w %x)\n", | |
126 | obj, | |
127 | obj->pin_count, obj->active, | |
128 | obj->base.write_domain); | |
129 | err++; | |
130 | } | |
131 | } | |
132 | ||
133 | return warned = err; | |
673a394b EA |
134 | } |
135 | #endif /* WATCH_INACTIVE */ | |
136 | ||
137 | ||
3d2a812a | 138 | #if WATCH_EXEC | WATCH_PWRITE |
673a394b EA |
139 | static void |
140 | i915_gem_dump_page(struct page *page, uint32_t start, uint32_t end, | |
141 | uint32_t bias, uint32_t mark) | |
142 | { | |
143 | uint32_t *mem = kmap_atomic(page, KM_USER0); | |
144 | int i; | |
145 | for (i = start; i < end; i += 4) | |
146 | DRM_INFO("%08x: %08x%s\n", | |
147 | (int) (bias + i), mem[i / 4], | |
148 | (bias + i == mark) ? " ********" : ""); | |
149 | kunmap_atomic(mem, KM_USER0); | |
150 | /* give syslog time to catch up */ | |
151 | msleep(1); | |
152 | } | |
153 | ||
154 | void | |
05394f39 | 155 | i915_gem_dump_object(struct drm_i915_gem_object *obj, int len, |
673a394b EA |
156 | const char *where, uint32_t mark) |
157 | { | |
673a394b EA |
158 | int page; |
159 | ||
05394f39 | 160 | DRM_INFO("%s: object at offset %08x\n", where, obj->gtt_offset); |
673a394b EA |
161 | for (page = 0; page < (len + PAGE_SIZE-1) / PAGE_SIZE; page++) { |
162 | int page_len, chunk, chunk_len; | |
163 | ||
164 | page_len = len - page * PAGE_SIZE; | |
165 | if (page_len > PAGE_SIZE) | |
166 | page_len = PAGE_SIZE; | |
167 | ||
168 | for (chunk = 0; chunk < page_len; chunk += 128) { | |
169 | chunk_len = page_len - chunk; | |
170 | if (chunk_len > 128) | |
171 | chunk_len = 128; | |
05394f39 | 172 | i915_gem_dump_page(obj->pages[page], |
673a394b | 173 | chunk, chunk + chunk_len, |
05394f39 | 174 | obj->gtt_offset + |
673a394b EA |
175 | page * PAGE_SIZE, |
176 | mark); | |
177 | } | |
178 | } | |
179 | } | |
180 | #endif | |
181 | ||
673a394b EA |
182 | #if WATCH_COHERENCY |
183 | void | |
05394f39 | 184 | i915_gem_object_check_coherency(struct drm_i915_gem_object *obj, int handle) |
673a394b | 185 | { |
05394f39 | 186 | struct drm_device *dev = obj->base.dev; |
673a394b EA |
187 | int page; |
188 | uint32_t *gtt_mapping; | |
189 | uint32_t *backing_map = NULL; | |
190 | int bad_count = 0; | |
191 | ||
cfd43c02 | 192 | DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %zdkb):\n", |
05394f39 | 193 | __func__, obj, obj->gtt_offset, handle, |
673a394b EA |
194 | obj->size / 1024); |
195 | ||
05394f39 | 196 | gtt_mapping = ioremap(dev->agp->base + obj->gtt_offset, obj->base.size); |
673a394b EA |
197 | if (gtt_mapping == NULL) { |
198 | DRM_ERROR("failed to map GTT space\n"); | |
199 | return; | |
200 | } | |
201 | ||
202 | for (page = 0; page < obj->size / PAGE_SIZE; page++) { | |
203 | int i; | |
204 | ||
05394f39 | 205 | backing_map = kmap_atomic(obj->pages[page], KM_USER0); |
673a394b EA |
206 | |
207 | if (backing_map == NULL) { | |
208 | DRM_ERROR("failed to map backing page\n"); | |
209 | goto out; | |
210 | } | |
211 | ||
212 | for (i = 0; i < PAGE_SIZE / 4; i++) { | |
213 | uint32_t cpuval = backing_map[i]; | |
214 | uint32_t gttval = readl(gtt_mapping + | |
215 | page * 1024 + i); | |
216 | ||
217 | if (cpuval != gttval) { | |
218 | DRM_INFO("incoherent CPU vs GPU at 0x%08x: " | |
219 | "0x%08x vs 0x%08x\n", | |
05394f39 | 220 | (int)(obj->gtt_offset + |
673a394b EA |
221 | page * PAGE_SIZE + i * 4), |
222 | cpuval, gttval); | |
223 | if (bad_count++ >= 8) { | |
224 | DRM_INFO("...\n"); | |
225 | goto out; | |
226 | } | |
227 | } | |
228 | } | |
229 | kunmap_atomic(backing_map, KM_USER0); | |
230 | backing_map = NULL; | |
231 | } | |
232 | ||
233 | out: | |
234 | if (backing_map != NULL) | |
235 | kunmap_atomic(backing_map, KM_USER0); | |
236 | iounmap(gtt_mapping); | |
237 | ||
238 | /* give syslog time to catch up */ | |
239 | msleep(1); | |
240 | ||
241 | /* Directly flush the object, since we just loaded values with the CPU | |
242 | * from the backing pages and we don't want to disturb the cache | |
243 | * management that we're trying to observe. | |
244 | */ | |
245 | ||
246 | i915_gem_clflush_object(obj); | |
247 | } | |
248 | #endif |