dt-bindings: mailbox: Add Amlogic Meson MHU Bindings
[deliverable/linux.git] / drivers / gpu / drm / i915 / i915_sysfs.c
1 /*
2 * Copyright © 2012 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Ben Widawsky <ben@bwidawsk.net>
25 *
26 */
27
28 #include <linux/device.h>
29 #include <linux/module.h>
30 #include <linux/stat.h>
31 #include <linux/sysfs.h>
32 #include "intel_drv.h"
33 #include "i915_drv.h"
34
35 #define dev_to_drm_minor(d) dev_get_drvdata((d))
36
37 #ifdef CONFIG_PM
38 static u32 calc_residency(struct drm_device *dev,
39 i915_reg_t reg)
40 {
41 struct drm_i915_private *dev_priv = to_i915(dev);
42 u64 raw_time; /* 32b value may overflow during fixed point math */
43 u64 units = 128ULL, div = 100000ULL;
44 u32 ret;
45
46 if (!intel_enable_rc6())
47 return 0;
48
49 intel_runtime_pm_get(dev_priv);
50
51 /* On VLV and CHV, residency time is in CZ units rather than 1.28us */
52 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
53 units = 1;
54 div = dev_priv->czclk_freq;
55
56 if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
57 units <<= 8;
58 } else if (IS_BROXTON(dev)) {
59 units = 1;
60 div = 1200; /* 833.33ns */
61 }
62
63 raw_time = I915_READ(reg) * units;
64 ret = DIV_ROUND_UP_ULL(raw_time, div);
65
66 intel_runtime_pm_put(dev_priv);
67 return ret;
68 }
69
70 static ssize_t
71 show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf)
72 {
73 return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6());
74 }
75
76 static ssize_t
77 show_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
78 {
79 struct drm_minor *dminor = dev_get_drvdata(kdev);
80 u32 rc6_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6);
81 return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency);
82 }
83
84 static ssize_t
85 show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf)
86 {
87 struct drm_minor *dminor = dev_to_drm_minor(kdev);
88 u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p);
89 return snprintf(buf, PAGE_SIZE, "%u\n", rc6p_residency);
90 }
91
92 static ssize_t
93 show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf)
94 {
95 struct drm_minor *dminor = dev_to_drm_minor(kdev);
96 u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp);
97 return snprintf(buf, PAGE_SIZE, "%u\n", rc6pp_residency);
98 }
99
100 static ssize_t
101 show_media_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
102 {
103 struct drm_minor *dminor = dev_get_drvdata(kdev);
104 u32 rc6_residency = calc_residency(dminor->dev, VLV_GT_MEDIA_RC6);
105 return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency);
106 }
107
108 static DEVICE_ATTR(rc6_enable, S_IRUGO, show_rc6_mask, NULL);
109 static DEVICE_ATTR(rc6_residency_ms, S_IRUGO, show_rc6_ms, NULL);
110 static DEVICE_ATTR(rc6p_residency_ms, S_IRUGO, show_rc6p_ms, NULL);
111 static DEVICE_ATTR(rc6pp_residency_ms, S_IRUGO, show_rc6pp_ms, NULL);
112 static DEVICE_ATTR(media_rc6_residency_ms, S_IRUGO, show_media_rc6_ms, NULL);
113
114 static struct attribute *rc6_attrs[] = {
115 &dev_attr_rc6_enable.attr,
116 &dev_attr_rc6_residency_ms.attr,
117 NULL
118 };
119
120 static struct attribute_group rc6_attr_group = {
121 .name = power_group_name,
122 .attrs = rc6_attrs
123 };
124
125 static struct attribute *rc6p_attrs[] = {
126 &dev_attr_rc6p_residency_ms.attr,
127 &dev_attr_rc6pp_residency_ms.attr,
128 NULL
129 };
130
131 static struct attribute_group rc6p_attr_group = {
132 .name = power_group_name,
133 .attrs = rc6p_attrs
134 };
135
136 static struct attribute *media_rc6_attrs[] = {
137 &dev_attr_media_rc6_residency_ms.attr,
138 NULL
139 };
140
141 static struct attribute_group media_rc6_attr_group = {
142 .name = power_group_name,
143 .attrs = media_rc6_attrs
144 };
145 #endif
146
147 static int l3_access_valid(struct drm_device *dev, loff_t offset)
148 {
149 if (!HAS_L3_DPF(dev))
150 return -EPERM;
151
152 if (offset % 4 != 0)
153 return -EINVAL;
154
155 if (offset >= GEN7_L3LOG_SIZE)
156 return -ENXIO;
157
158 return 0;
159 }
160
161 static ssize_t
162 i915_l3_read(struct file *filp, struct kobject *kobj,
163 struct bin_attribute *attr, char *buf,
164 loff_t offset, size_t count)
165 {
166 struct device *dev = kobj_to_dev(kobj);
167 struct drm_minor *dminor = dev_to_drm_minor(dev);
168 struct drm_device *drm_dev = dminor->dev;
169 struct drm_i915_private *dev_priv = to_i915(drm_dev);
170 int slice = (int)(uintptr_t)attr->private;
171 int ret;
172
173 count = round_down(count, 4);
174
175 ret = l3_access_valid(drm_dev, offset);
176 if (ret)
177 return ret;
178
179 count = min_t(size_t, GEN7_L3LOG_SIZE - offset, count);
180
181 ret = i915_mutex_lock_interruptible(drm_dev);
182 if (ret)
183 return ret;
184
185 if (dev_priv->l3_parity.remap_info[slice])
186 memcpy(buf,
187 dev_priv->l3_parity.remap_info[slice] + (offset/4),
188 count);
189 else
190 memset(buf, 0, count);
191
192 mutex_unlock(&drm_dev->struct_mutex);
193
194 return count;
195 }
196
197 static ssize_t
198 i915_l3_write(struct file *filp, struct kobject *kobj,
199 struct bin_attribute *attr, char *buf,
200 loff_t offset, size_t count)
201 {
202 struct device *dev = kobj_to_dev(kobj);
203 struct drm_minor *dminor = dev_to_drm_minor(dev);
204 struct drm_device *drm_dev = dminor->dev;
205 struct drm_i915_private *dev_priv = to_i915(drm_dev);
206 struct i915_gem_context *ctx;
207 u32 *temp = NULL; /* Just here to make handling failures easy */
208 int slice = (int)(uintptr_t)attr->private;
209 int ret;
210
211 if (!HAS_HW_CONTEXTS(drm_dev))
212 return -ENXIO;
213
214 ret = l3_access_valid(drm_dev, offset);
215 if (ret)
216 return ret;
217
218 ret = i915_mutex_lock_interruptible(drm_dev);
219 if (ret)
220 return ret;
221
222 if (!dev_priv->l3_parity.remap_info[slice]) {
223 temp = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL);
224 if (!temp) {
225 mutex_unlock(&drm_dev->struct_mutex);
226 return -ENOMEM;
227 }
228 }
229
230 /* TODO: Ideally we really want a GPU reset here to make sure errors
231 * aren't propagated. Since I cannot find a stable way to reset the GPU
232 * at this point it is left as a TODO.
233 */
234 if (temp)
235 dev_priv->l3_parity.remap_info[slice] = temp;
236
237 memcpy(dev_priv->l3_parity.remap_info[slice] + (offset/4), buf, count);
238
239 /* NB: We defer the remapping until we switch to the context */
240 list_for_each_entry(ctx, &dev_priv->context_list, link)
241 ctx->remap_slice |= (1<<slice);
242
243 mutex_unlock(&drm_dev->struct_mutex);
244
245 return count;
246 }
247
248 static struct bin_attribute dpf_attrs = {
249 .attr = {.name = "l3_parity", .mode = (S_IRUSR | S_IWUSR)},
250 .size = GEN7_L3LOG_SIZE,
251 .read = i915_l3_read,
252 .write = i915_l3_write,
253 .mmap = NULL,
254 .private = (void *)0
255 };
256
257 static struct bin_attribute dpf_attrs_1 = {
258 .attr = {.name = "l3_parity_slice_1", .mode = (S_IRUSR | S_IWUSR)},
259 .size = GEN7_L3LOG_SIZE,
260 .read = i915_l3_read,
261 .write = i915_l3_write,
262 .mmap = NULL,
263 .private = (void *)1
264 };
265
266 static ssize_t gt_act_freq_mhz_show(struct device *kdev,
267 struct device_attribute *attr, char *buf)
268 {
269 struct drm_minor *minor = dev_to_drm_minor(kdev);
270 struct drm_device *dev = minor->dev;
271 struct drm_i915_private *dev_priv = to_i915(dev);
272 int ret;
273
274 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
275
276 intel_runtime_pm_get(dev_priv);
277
278 mutex_lock(&dev_priv->rps.hw_lock);
279 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
280 u32 freq;
281 freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
282 ret = intel_gpu_freq(dev_priv, (freq >> 8) & 0xff);
283 } else {
284 u32 rpstat = I915_READ(GEN6_RPSTAT1);
285 if (IS_GEN9(dev_priv))
286 ret = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
287 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
288 ret = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
289 else
290 ret = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
291 ret = intel_gpu_freq(dev_priv, ret);
292 }
293 mutex_unlock(&dev_priv->rps.hw_lock);
294
295 intel_runtime_pm_put(dev_priv);
296
297 return snprintf(buf, PAGE_SIZE, "%d\n", ret);
298 }
299
300 static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
301 struct device_attribute *attr, char *buf)
302 {
303 struct drm_minor *minor = dev_to_drm_minor(kdev);
304 struct drm_device *dev = minor->dev;
305 struct drm_i915_private *dev_priv = to_i915(dev);
306 int ret;
307
308 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
309
310 intel_runtime_pm_get(dev_priv);
311
312 mutex_lock(&dev_priv->rps.hw_lock);
313 ret = intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq);
314 mutex_unlock(&dev_priv->rps.hw_lock);
315
316 intel_runtime_pm_put(dev_priv);
317
318 return snprintf(buf, PAGE_SIZE, "%d\n", ret);
319 }
320
321 static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
322 struct device_attribute *attr, char *buf)
323 {
324 struct drm_minor *minor = dev_to_drm_minor(kdev);
325 struct drm_device *dev = minor->dev;
326 struct drm_i915_private *dev_priv = to_i915(dev);
327
328 return snprintf(buf, PAGE_SIZE,
329 "%d\n",
330 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
331 }
332
333 static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
334 {
335 struct drm_minor *minor = dev_to_drm_minor(kdev);
336 struct drm_device *dev = minor->dev;
337 struct drm_i915_private *dev_priv = to_i915(dev);
338 int ret;
339
340 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
341
342 mutex_lock(&dev_priv->rps.hw_lock);
343 ret = intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit);
344 mutex_unlock(&dev_priv->rps.hw_lock);
345
346 return snprintf(buf, PAGE_SIZE, "%d\n", ret);
347 }
348
349 static ssize_t gt_max_freq_mhz_store(struct device *kdev,
350 struct device_attribute *attr,
351 const char *buf, size_t count)
352 {
353 struct drm_minor *minor = dev_to_drm_minor(kdev);
354 struct drm_device *dev = minor->dev;
355 struct drm_i915_private *dev_priv = to_i915(dev);
356 u32 val;
357 ssize_t ret;
358
359 ret = kstrtou32(buf, 0, &val);
360 if (ret)
361 return ret;
362
363 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
364
365 intel_runtime_pm_get(dev_priv);
366
367 mutex_lock(&dev_priv->rps.hw_lock);
368
369 val = intel_freq_opcode(dev_priv, val);
370
371 if (val < dev_priv->rps.min_freq ||
372 val > dev_priv->rps.max_freq ||
373 val < dev_priv->rps.min_freq_softlimit) {
374 mutex_unlock(&dev_priv->rps.hw_lock);
375 intel_runtime_pm_put(dev_priv);
376 return -EINVAL;
377 }
378
379 if (val > dev_priv->rps.rp0_freq)
380 DRM_DEBUG("User requested overclocking to %d\n",
381 intel_gpu_freq(dev_priv, val));
382
383 dev_priv->rps.max_freq_softlimit = val;
384
385 val = clamp_t(int, dev_priv->rps.cur_freq,
386 dev_priv->rps.min_freq_softlimit,
387 dev_priv->rps.max_freq_softlimit);
388
389 /* We still need *_set_rps to process the new max_delay and
390 * update the interrupt limits and PMINTRMSK even though
391 * frequency request may be unchanged. */
392 intel_set_rps(dev_priv, val);
393
394 mutex_unlock(&dev_priv->rps.hw_lock);
395
396 intel_runtime_pm_put(dev_priv);
397
398 return count;
399 }
400
401 static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
402 {
403 struct drm_minor *minor = dev_to_drm_minor(kdev);
404 struct drm_device *dev = minor->dev;
405 struct drm_i915_private *dev_priv = to_i915(dev);
406 int ret;
407
408 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
409
410 mutex_lock(&dev_priv->rps.hw_lock);
411 ret = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit);
412 mutex_unlock(&dev_priv->rps.hw_lock);
413
414 return snprintf(buf, PAGE_SIZE, "%d\n", ret);
415 }
416
417 static ssize_t gt_min_freq_mhz_store(struct device *kdev,
418 struct device_attribute *attr,
419 const char *buf, size_t count)
420 {
421 struct drm_minor *minor = dev_to_drm_minor(kdev);
422 struct drm_device *dev = minor->dev;
423 struct drm_i915_private *dev_priv = to_i915(dev);
424 u32 val;
425 ssize_t ret;
426
427 ret = kstrtou32(buf, 0, &val);
428 if (ret)
429 return ret;
430
431 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
432
433 intel_runtime_pm_get(dev_priv);
434
435 mutex_lock(&dev_priv->rps.hw_lock);
436
437 val = intel_freq_opcode(dev_priv, val);
438
439 if (val < dev_priv->rps.min_freq ||
440 val > dev_priv->rps.max_freq ||
441 val > dev_priv->rps.max_freq_softlimit) {
442 mutex_unlock(&dev_priv->rps.hw_lock);
443 intel_runtime_pm_put(dev_priv);
444 return -EINVAL;
445 }
446
447 dev_priv->rps.min_freq_softlimit = val;
448
449 val = clamp_t(int, dev_priv->rps.cur_freq,
450 dev_priv->rps.min_freq_softlimit,
451 dev_priv->rps.max_freq_softlimit);
452
453 /* We still need *_set_rps to process the new min_delay and
454 * update the interrupt limits and PMINTRMSK even though
455 * frequency request may be unchanged. */
456 intel_set_rps(dev_priv, val);
457
458 mutex_unlock(&dev_priv->rps.hw_lock);
459
460 intel_runtime_pm_put(dev_priv);
461
462 return count;
463
464 }
465
466 static DEVICE_ATTR(gt_act_freq_mhz, S_IRUGO, gt_act_freq_mhz_show, NULL);
467 static DEVICE_ATTR(gt_cur_freq_mhz, S_IRUGO, gt_cur_freq_mhz_show, NULL);
468 static DEVICE_ATTR(gt_max_freq_mhz, S_IRUGO | S_IWUSR, gt_max_freq_mhz_show, gt_max_freq_mhz_store);
469 static DEVICE_ATTR(gt_min_freq_mhz, S_IRUGO | S_IWUSR, gt_min_freq_mhz_show, gt_min_freq_mhz_store);
470
471 static DEVICE_ATTR(vlv_rpe_freq_mhz, S_IRUGO, vlv_rpe_freq_mhz_show, NULL);
472
473 static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf);
474 static DEVICE_ATTR(gt_RP0_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
475 static DEVICE_ATTR(gt_RP1_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
476 static DEVICE_ATTR(gt_RPn_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
477
478 /* For now we have a static number of RP states */
479 static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
480 {
481 struct drm_minor *minor = dev_to_drm_minor(kdev);
482 struct drm_device *dev = minor->dev;
483 struct drm_i915_private *dev_priv = to_i915(dev);
484 u32 val;
485
486 if (attr == &dev_attr_gt_RP0_freq_mhz)
487 val = intel_gpu_freq(dev_priv, dev_priv->rps.rp0_freq);
488 else if (attr == &dev_attr_gt_RP1_freq_mhz)
489 val = intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq);
490 else if (attr == &dev_attr_gt_RPn_freq_mhz)
491 val = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq);
492 else
493 BUG();
494
495 return snprintf(buf, PAGE_SIZE, "%d\n", val);
496 }
497
498 static const struct attribute *gen6_attrs[] = {
499 &dev_attr_gt_act_freq_mhz.attr,
500 &dev_attr_gt_cur_freq_mhz.attr,
501 &dev_attr_gt_max_freq_mhz.attr,
502 &dev_attr_gt_min_freq_mhz.attr,
503 &dev_attr_gt_RP0_freq_mhz.attr,
504 &dev_attr_gt_RP1_freq_mhz.attr,
505 &dev_attr_gt_RPn_freq_mhz.attr,
506 NULL,
507 };
508
509 static const struct attribute *vlv_attrs[] = {
510 &dev_attr_gt_act_freq_mhz.attr,
511 &dev_attr_gt_cur_freq_mhz.attr,
512 &dev_attr_gt_max_freq_mhz.attr,
513 &dev_attr_gt_min_freq_mhz.attr,
514 &dev_attr_gt_RP0_freq_mhz.attr,
515 &dev_attr_gt_RP1_freq_mhz.attr,
516 &dev_attr_gt_RPn_freq_mhz.attr,
517 &dev_attr_vlv_rpe_freq_mhz.attr,
518 NULL,
519 };
520
521 static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
522 struct bin_attribute *attr, char *buf,
523 loff_t off, size_t count)
524 {
525
526 struct device *kdev = kobj_to_dev(kobj);
527 struct drm_minor *minor = dev_to_drm_minor(kdev);
528 struct drm_device *dev = minor->dev;
529 struct i915_error_state_file_priv error_priv;
530 struct drm_i915_error_state_buf error_str;
531 ssize_t ret_count = 0;
532 int ret;
533
534 memset(&error_priv, 0, sizeof(error_priv));
535
536 ret = i915_error_state_buf_init(&error_str, to_i915(dev), count, off);
537 if (ret)
538 return ret;
539
540 error_priv.dev = dev;
541 i915_error_state_get(dev, &error_priv);
542
543 ret = i915_error_state_to_str(&error_str, &error_priv);
544 if (ret)
545 goto out;
546
547 ret_count = count < error_str.bytes ? count : error_str.bytes;
548
549 memcpy(buf, error_str.buf, ret_count);
550 out:
551 i915_error_state_put(&error_priv);
552 i915_error_state_buf_release(&error_str);
553
554 return ret ?: ret_count;
555 }
556
557 static ssize_t error_state_write(struct file *file, struct kobject *kobj,
558 struct bin_attribute *attr, char *buf,
559 loff_t off, size_t count)
560 {
561 struct device *kdev = kobj_to_dev(kobj);
562 struct drm_minor *minor = dev_to_drm_minor(kdev);
563 struct drm_device *dev = minor->dev;
564 int ret;
565
566 DRM_DEBUG_DRIVER("Resetting error state\n");
567
568 ret = mutex_lock_interruptible(&dev->struct_mutex);
569 if (ret)
570 return ret;
571
572 i915_destroy_error_state(dev);
573 mutex_unlock(&dev->struct_mutex);
574
575 return count;
576 }
577
578 static struct bin_attribute error_state_attr = {
579 .attr.name = "error",
580 .attr.mode = S_IRUSR | S_IWUSR,
581 .size = 0,
582 .read = error_state_read,
583 .write = error_state_write,
584 };
585
586 void i915_setup_sysfs(struct drm_device *dev)
587 {
588 int ret;
589
590 #ifdef CONFIG_PM
591 if (HAS_RC6(dev)) {
592 ret = sysfs_merge_group(&dev->primary->kdev->kobj,
593 &rc6_attr_group);
594 if (ret)
595 DRM_ERROR("RC6 residency sysfs setup failed\n");
596 }
597 if (HAS_RC6p(dev)) {
598 ret = sysfs_merge_group(&dev->primary->kdev->kobj,
599 &rc6p_attr_group);
600 if (ret)
601 DRM_ERROR("RC6p residency sysfs setup failed\n");
602 }
603 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
604 ret = sysfs_merge_group(&dev->primary->kdev->kobj,
605 &media_rc6_attr_group);
606 if (ret)
607 DRM_ERROR("Media RC6 residency sysfs setup failed\n");
608 }
609 #endif
610 if (HAS_L3_DPF(dev)) {
611 ret = device_create_bin_file(dev->primary->kdev, &dpf_attrs);
612 if (ret)
613 DRM_ERROR("l3 parity sysfs setup failed\n");
614
615 if (NUM_L3_SLICES(dev) > 1) {
616 ret = device_create_bin_file(dev->primary->kdev,
617 &dpf_attrs_1);
618 if (ret)
619 DRM_ERROR("l3 parity slice 1 setup failed\n");
620 }
621 }
622
623 ret = 0;
624 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
625 ret = sysfs_create_files(&dev->primary->kdev->kobj, vlv_attrs);
626 else if (INTEL_INFO(dev)->gen >= 6)
627 ret = sysfs_create_files(&dev->primary->kdev->kobj, gen6_attrs);
628 if (ret)
629 DRM_ERROR("RPS sysfs setup failed\n");
630
631 ret = sysfs_create_bin_file(&dev->primary->kdev->kobj,
632 &error_state_attr);
633 if (ret)
634 DRM_ERROR("error_state sysfs setup failed\n");
635 }
636
637 void i915_teardown_sysfs(struct drm_device *dev)
638 {
639 sysfs_remove_bin_file(&dev->primary->kdev->kobj, &error_state_attr);
640 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
641 sysfs_remove_files(&dev->primary->kdev->kobj, vlv_attrs);
642 else
643 sysfs_remove_files(&dev->primary->kdev->kobj, gen6_attrs);
644 device_remove_bin_file(dev->primary->kdev, &dpf_attrs_1);
645 device_remove_bin_file(dev->primary->kdev, &dpf_attrs);
646 #ifdef CONFIG_PM
647 sysfs_unmerge_group(&dev->primary->kdev->kobj, &rc6_attr_group);
648 sysfs_unmerge_group(&dev->primary->kdev->kobj, &rc6p_attr_group);
649 #endif
650 }
This page took 0.088101 seconds and 5 git commands to generate.