drm/amdgpu: implement cgs gpu memory callbacks
[deliverable/linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_cgs.c
CommitLineData
d03846af
CZ
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 *
23 */
57ff96cf
CZ
24#include <linux/list.h>
25#include <linux/slab.h>
97cb7f6e 26#include <linux/pci.h>
57ff96cf
CZ
27#include <drm/drmP.h>
28#include <drm/amdgpu_drm.h>
d03846af
CZ
29#include "amdgpu.h"
30#include "cgs_linux.h"
25da4427 31#include "atom.h"
d03846af
CZ
32
33struct amdgpu_cgs_device {
34 struct cgs_device base;
35 struct amdgpu_device *adev;
36};
37
38#define CGS_FUNC_ADEV \
39 struct amdgpu_device *adev = \
40 ((struct amdgpu_cgs_device *)cgs_device)->adev
41
42static int amdgpu_cgs_gpu_mem_info(void *cgs_device, enum cgs_gpu_mem_type type,
43 uint64_t *mc_start, uint64_t *mc_size,
44 uint64_t *mem_size)
45{
57ff96cf
CZ
46 CGS_FUNC_ADEV;
47 switch(type) {
48 case CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB:
49 case CGS_GPU_MEM_TYPE__VISIBLE_FB:
50 *mc_start = 0;
51 *mc_size = adev->mc.visible_vram_size;
52 *mem_size = adev->mc.visible_vram_size - adev->vram_pin_size;
53 break;
54 case CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB:
55 case CGS_GPU_MEM_TYPE__INVISIBLE_FB:
56 *mc_start = adev->mc.visible_vram_size;
57 *mc_size = adev->mc.real_vram_size - adev->mc.visible_vram_size;
58 *mem_size = *mc_size;
59 break;
60 case CGS_GPU_MEM_TYPE__GART_CACHEABLE:
61 case CGS_GPU_MEM_TYPE__GART_WRITECOMBINE:
62 *mc_start = adev->mc.gtt_start;
63 *mc_size = adev->mc.gtt_size;
64 *mem_size = adev->mc.gtt_size - adev->gart_pin_size;
65 break;
66 default:
67 return -EINVAL;
68 }
69
d03846af
CZ
70 return 0;
71}
72
73static int amdgpu_cgs_gmap_kmem(void *cgs_device, void *kmem,
74 uint64_t size,
75 uint64_t min_offset, uint64_t max_offset,
76 cgs_handle_t *kmem_handle, uint64_t *mcaddr)
77{
57ff96cf
CZ
78 CGS_FUNC_ADEV;
79 int ret;
80 struct amdgpu_bo *bo;
81 struct page *kmem_page = vmalloc_to_page(kmem);
82 int npages = ALIGN(size, PAGE_SIZE) >> PAGE_SHIFT;
83
84 struct sg_table *sg = drm_prime_pages_to_sg(&kmem_page, npages);
85 ret = amdgpu_bo_create(adev, size, PAGE_SIZE, false,
86 AMDGPU_GEM_DOMAIN_GTT, 0, sg, &bo);
87 if (ret)
88 return ret;
89 ret = amdgpu_bo_reserve(bo, false);
90 if (unlikely(ret != 0))
91 return ret;
92
93 /* pin buffer into GTT */
94 ret = amdgpu_bo_pin_restricted(bo, AMDGPU_GEM_DOMAIN_GTT,
95 min_offset, max_offset, mcaddr);
96 amdgpu_bo_unreserve(bo);
97
98 *kmem_handle = (cgs_handle_t)bo;
99 return ret;
d03846af
CZ
100}
101
102static int amdgpu_cgs_gunmap_kmem(void *cgs_device, cgs_handle_t kmem_handle)
103{
57ff96cf
CZ
104 struct amdgpu_bo *obj = (struct amdgpu_bo *)kmem_handle;
105
106 if (obj) {
107 int r = amdgpu_bo_reserve(obj, false);
108 if (likely(r == 0)) {
109 amdgpu_bo_unpin(obj);
110 amdgpu_bo_unreserve(obj);
111 }
112 amdgpu_bo_unref(&obj);
113
114 }
d03846af
CZ
115 return 0;
116}
117
118static int amdgpu_cgs_alloc_gpu_mem(void *cgs_device,
119 enum cgs_gpu_mem_type type,
120 uint64_t size, uint64_t align,
121 uint64_t min_offset, uint64_t max_offset,
122 cgs_handle_t *handle)
123{
57ff96cf
CZ
124 CGS_FUNC_ADEV;
125 uint16_t flags = 0;
126 int ret = 0;
127 uint32_t domain = 0;
128 struct amdgpu_bo *obj;
129 struct ttm_placement placement;
130 struct ttm_place place;
131
132 if (min_offset > max_offset) {
133 BUG_ON(1);
134 return -EINVAL;
135 }
136
137 /* fail if the alignment is not a power of 2 */
138 if (((align != 1) && (align & (align - 1)))
139 || size == 0 || align == 0)
140 return -EINVAL;
141
142
143 switch(type) {
144 case CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB:
145 case CGS_GPU_MEM_TYPE__VISIBLE_FB:
146 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
147 domain = AMDGPU_GEM_DOMAIN_VRAM;
148 if (max_offset > adev->mc.real_vram_size)
149 return -EINVAL;
150 place.fpfn = min_offset >> PAGE_SHIFT;
151 place.lpfn = max_offset >> PAGE_SHIFT;
152 place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
153 TTM_PL_FLAG_VRAM;
154 break;
155 case CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB:
156 case CGS_GPU_MEM_TYPE__INVISIBLE_FB:
157 flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
158 domain = AMDGPU_GEM_DOMAIN_VRAM;
159 if (adev->mc.visible_vram_size < adev->mc.real_vram_size) {
160 place.fpfn =
161 max(min_offset, adev->mc.visible_vram_size) >> PAGE_SHIFT;
162 place.lpfn =
163 min(max_offset, adev->mc.real_vram_size) >> PAGE_SHIFT;
164 place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
165 TTM_PL_FLAG_VRAM;
166 }
167
168 break;
169 case CGS_GPU_MEM_TYPE__GART_CACHEABLE:
170 domain = AMDGPU_GEM_DOMAIN_GTT;
171 place.fpfn = min_offset >> PAGE_SHIFT;
172 place.lpfn = max_offset >> PAGE_SHIFT;
173 place.flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT;
174 break;
175 case CGS_GPU_MEM_TYPE__GART_WRITECOMBINE:
176 flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC;
177 domain = AMDGPU_GEM_DOMAIN_GTT;
178 place.fpfn = min_offset >> PAGE_SHIFT;
179 place.lpfn = max_offset >> PAGE_SHIFT;
180 place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT |
181 TTM_PL_FLAG_UNCACHED;
182 break;
183 default:
184 return -EINVAL;
185 }
186
187
188 *handle = 0;
189
190 placement.placement = &place;
191 placement.num_placement = 1;
192 placement.busy_placement = &place;
193 placement.num_busy_placement = 1;
194
195 ret = amdgpu_bo_create_restricted(adev, size, PAGE_SIZE,
196 true, domain, flags,
197 NULL, &placement, &obj);
198 if (ret) {
199 DRM_ERROR("(%d) bo create failed\n", ret);
200 return ret;
201 }
202 *handle = (cgs_handle_t)obj;
203
204 return ret;
d03846af
CZ
205}
206
207static int amdgpu_cgs_import_gpu_mem(void *cgs_device, int dmabuf_fd,
208 cgs_handle_t *handle)
209{
57ff96cf
CZ
210 CGS_FUNC_ADEV;
211 int r;
212 uint32_t dma_handle;
213 struct drm_gem_object *obj;
214 struct amdgpu_bo *bo;
215 struct drm_device *dev = adev->ddev;
216 struct drm_file *file_priv = NULL, *priv;
217
218 mutex_lock(&dev->struct_mutex);
219 list_for_each_entry(priv, &dev->filelist, lhead) {
220 rcu_read_lock();
221 if (priv->pid == get_pid(task_pid(current)))
222 file_priv = priv;
223 rcu_read_unlock();
224 if (file_priv)
225 break;
226 }
227 mutex_unlock(&dev->struct_mutex);
228 r = dev->driver->prime_fd_to_handle(dev,
229 file_priv, dmabuf_fd,
230 &dma_handle);
231 spin_lock(&file_priv->table_lock);
232
233 /* Check if we currently have a reference on the object */
234 obj = idr_find(&file_priv->object_idr, dma_handle);
235 if (obj == NULL) {
236 spin_unlock(&file_priv->table_lock);
237 return -EINVAL;
238 }
239 spin_unlock(&file_priv->table_lock);
240 bo = gem_to_amdgpu_bo(obj);
241 *handle = (cgs_handle_t)bo;
d03846af
CZ
242 return 0;
243}
244
245static int amdgpu_cgs_free_gpu_mem(void *cgs_device, cgs_handle_t handle)
246{
57ff96cf
CZ
247 struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
248
249 if (obj) {
250 int r = amdgpu_bo_reserve(obj, false);
251 if (likely(r == 0)) {
252 amdgpu_bo_kunmap(obj);
253 amdgpu_bo_unpin(obj);
254 amdgpu_bo_unreserve(obj);
255 }
256 amdgpu_bo_unref(&obj);
257
258 }
d03846af
CZ
259 return 0;
260}
261
262static int amdgpu_cgs_gmap_gpu_mem(void *cgs_device, cgs_handle_t handle,
263 uint64_t *mcaddr)
264{
57ff96cf
CZ
265 int r;
266 u64 min_offset, max_offset;
267 struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
268
269 WARN_ON_ONCE(obj->placement.num_placement > 1);
270
271 min_offset = obj->placements[0].fpfn << PAGE_SHIFT;
272 max_offset = obj->placements[0].lpfn << PAGE_SHIFT;
273
274 r = amdgpu_bo_reserve(obj, false);
275 if (unlikely(r != 0))
276 return r;
277 r = amdgpu_bo_pin_restricted(obj, AMDGPU_GEM_DOMAIN_GTT,
278 min_offset, max_offset, mcaddr);
279 amdgpu_bo_unreserve(obj);
280 return r;
d03846af
CZ
281}
282
283static int amdgpu_cgs_gunmap_gpu_mem(void *cgs_device, cgs_handle_t handle)
284{
57ff96cf
CZ
285 int r;
286 struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
287 r = amdgpu_bo_reserve(obj, false);
288 if (unlikely(r != 0))
289 return r;
290 r = amdgpu_bo_unpin(obj);
291 amdgpu_bo_unreserve(obj);
292 return r;
d03846af
CZ
293}
294
295static int amdgpu_cgs_kmap_gpu_mem(void *cgs_device, cgs_handle_t handle,
296 void **map)
297{
57ff96cf
CZ
298 int r;
299 struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
300 r = amdgpu_bo_reserve(obj, false);
301 if (unlikely(r != 0))
302 return r;
303 r = amdgpu_bo_kmap(obj, map);
304 amdgpu_bo_unreserve(obj);
305 return r;
d03846af
CZ
306}
307
308static int amdgpu_cgs_kunmap_gpu_mem(void *cgs_device, cgs_handle_t handle)
309{
57ff96cf
CZ
310 int r;
311 struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
312 r = amdgpu_bo_reserve(obj, false);
313 if (unlikely(r != 0))
314 return r;
315 amdgpu_bo_kunmap(obj);
316 amdgpu_bo_unreserve(obj);
317 return r;
d03846af
CZ
318}
319
320static uint32_t amdgpu_cgs_read_register(void *cgs_device, unsigned offset)
321{
aba684d8
CZ
322 CGS_FUNC_ADEV;
323 return RREG32(offset);
d03846af
CZ
324}
325
326static void amdgpu_cgs_write_register(void *cgs_device, unsigned offset,
327 uint32_t value)
328{
aba684d8
CZ
329 CGS_FUNC_ADEV;
330 WREG32(offset, value);
d03846af
CZ
331}
332
333static uint32_t amdgpu_cgs_read_ind_register(void *cgs_device,
334 enum cgs_ind_reg space,
335 unsigned index)
336{
aba684d8
CZ
337 CGS_FUNC_ADEV;
338 switch (space) {
339 case CGS_IND_REG__MMIO:
340 return RREG32_IDX(index);
341 case CGS_IND_REG__PCIE:
342 return RREG32_PCIE(index);
343 case CGS_IND_REG__SMC:
344 return RREG32_SMC(index);
345 case CGS_IND_REG__UVD_CTX:
346 return RREG32_UVD_CTX(index);
347 case CGS_IND_REG__DIDT:
348 return RREG32_DIDT(index);
349 case CGS_IND_REG__AUDIO_ENDPT:
350 DRM_ERROR("audio endpt register access not implemented.\n");
351 return 0;
352 }
353 WARN(1, "Invalid indirect register space");
d03846af
CZ
354 return 0;
355}
356
357static void amdgpu_cgs_write_ind_register(void *cgs_device,
358 enum cgs_ind_reg space,
359 unsigned index, uint32_t value)
360{
aba684d8
CZ
361 CGS_FUNC_ADEV;
362 switch (space) {
363 case CGS_IND_REG__MMIO:
364 return WREG32_IDX(index, value);
365 case CGS_IND_REG__PCIE:
366 return WREG32_PCIE(index, value);
367 case CGS_IND_REG__SMC:
368 return WREG32_SMC(index, value);
369 case CGS_IND_REG__UVD_CTX:
370 return WREG32_UVD_CTX(index, value);
371 case CGS_IND_REG__DIDT:
372 return WREG32_DIDT(index, value);
373 case CGS_IND_REG__AUDIO_ENDPT:
374 DRM_ERROR("audio endpt register access not implemented.\n");
375 return;
376 }
377 WARN(1, "Invalid indirect register space");
d03846af
CZ
378}
379
380static uint8_t amdgpu_cgs_read_pci_config_byte(void *cgs_device, unsigned addr)
381{
97cb7f6e
CZ
382 CGS_FUNC_ADEV;
383 uint8_t val;
384 int ret = pci_read_config_byte(adev->pdev, addr, &val);
385 if (WARN(ret, "pci_read_config_byte error"))
386 return 0;
387 return val;
d03846af
CZ
388}
389
390static uint16_t amdgpu_cgs_read_pci_config_word(void *cgs_device, unsigned addr)
391{
97cb7f6e
CZ
392 CGS_FUNC_ADEV;
393 uint16_t val;
394 int ret = pci_read_config_word(adev->pdev, addr, &val);
395 if (WARN(ret, "pci_read_config_word error"))
396 return 0;
397 return val;
d03846af
CZ
398}
399
400static uint32_t amdgpu_cgs_read_pci_config_dword(void *cgs_device,
401 unsigned addr)
402{
97cb7f6e
CZ
403 CGS_FUNC_ADEV;
404 uint32_t val;
405 int ret = pci_read_config_dword(adev->pdev, addr, &val);
406 if (WARN(ret, "pci_read_config_dword error"))
407 return 0;
408 return val;
d03846af
CZ
409}
410
411static void amdgpu_cgs_write_pci_config_byte(void *cgs_device, unsigned addr,
412 uint8_t value)
413{
97cb7f6e
CZ
414 CGS_FUNC_ADEV;
415 int ret = pci_write_config_byte(adev->pdev, addr, value);
416 WARN(ret, "pci_write_config_byte error");
d03846af
CZ
417}
418
419static void amdgpu_cgs_write_pci_config_word(void *cgs_device, unsigned addr,
420 uint16_t value)
421{
97cb7f6e
CZ
422 CGS_FUNC_ADEV;
423 int ret = pci_write_config_word(adev->pdev, addr, value);
424 WARN(ret, "pci_write_config_word error");
d03846af
CZ
425}
426
427static void amdgpu_cgs_write_pci_config_dword(void *cgs_device, unsigned addr,
428 uint32_t value)
429{
97cb7f6e
CZ
430 CGS_FUNC_ADEV;
431 int ret = pci_write_config_dword(adev->pdev, addr, value);
432 WARN(ret, "pci_write_config_dword error");
d03846af
CZ
433}
434
435static const void *amdgpu_cgs_atom_get_data_table(void *cgs_device,
436 unsigned table, uint16_t *size,
437 uint8_t *frev, uint8_t *crev)
438{
25da4427
CZ
439 CGS_FUNC_ADEV;
440 uint16_t data_start;
441
442 if (amdgpu_atom_parse_data_header(
443 adev->mode_info.atom_context, table, size,
444 frev, crev, &data_start))
445 return (uint8_t*)adev->mode_info.atom_context->bios +
446 data_start;
447
d03846af
CZ
448 return NULL;
449}
450
451static int amdgpu_cgs_atom_get_cmd_table_revs(void *cgs_device, unsigned table,
452 uint8_t *frev, uint8_t *crev)
453{
25da4427
CZ
454 CGS_FUNC_ADEV;
455
456 if (amdgpu_atom_parse_cmd_header(
457 adev->mode_info.atom_context, table,
458 frev, crev))
459 return 0;
460
461 return -EINVAL;
d03846af
CZ
462}
463
464static int amdgpu_cgs_atom_exec_cmd_table(void *cgs_device, unsigned table,
465 void *args)
466{
25da4427 467 CGS_FUNC_ADEV;
d03846af 468
25da4427
CZ
469 return amdgpu_atom_execute_table(
470 adev->mode_info.atom_context, table, args);
471}
d03846af
CZ
472
473static int amdgpu_cgs_create_pm_request(void *cgs_device, cgs_handle_t *request)
474{
475 /* TODO */
476 return 0;
477}
478
479static int amdgpu_cgs_destroy_pm_request(void *cgs_device, cgs_handle_t request)
480{
481 /* TODO */
482 return 0;
483}
484
485static int amdgpu_cgs_set_pm_request(void *cgs_device, cgs_handle_t request,
486 int active)
487{
488 /* TODO */
489 return 0;
490}
491
492static int amdgpu_cgs_pm_request_clock(void *cgs_device, cgs_handle_t request,
493 enum cgs_clock clock, unsigned freq)
494{
495 /* TODO */
496 return 0;
497}
498
499static int amdgpu_cgs_pm_request_engine(void *cgs_device, cgs_handle_t request,
500 enum cgs_engine engine, int powered)
501{
502 /* TODO */
503 return 0;
504}
505
506
507
508static int amdgpu_cgs_pm_query_clock_limits(void *cgs_device,
509 enum cgs_clock clock,
510 struct cgs_clock_limits *limits)
511{
512 /* TODO */
513 return 0;
514}
515
516static int amdgpu_cgs_set_camera_voltages(void *cgs_device, uint32_t mask,
517 const uint32_t *voltages)
518{
519 DRM_ERROR("not implemented");
520 return -EPERM;
521}
522
0cf3be21
AD
523struct cgs_irq_params {
524 unsigned src_id;
525 cgs_irq_source_set_func_t set;
526 cgs_irq_handler_func_t handler;
527 void *private_data;
528};
529
530static int cgs_set_irq_state(struct amdgpu_device *adev,
531 struct amdgpu_irq_src *src,
532 unsigned type,
533 enum amdgpu_interrupt_state state)
534{
535 struct cgs_irq_params *irq_params =
536 (struct cgs_irq_params *)src->data;
537 if (!irq_params)
538 return -EINVAL;
539 if (!irq_params->set)
540 return -EINVAL;
541 return irq_params->set(irq_params->private_data,
542 irq_params->src_id,
543 type,
544 (int)state);
545}
546
547static int cgs_process_irq(struct amdgpu_device *adev,
548 struct amdgpu_irq_src *source,
549 struct amdgpu_iv_entry *entry)
550{
551 struct cgs_irq_params *irq_params =
552 (struct cgs_irq_params *)source->data;
553 if (!irq_params)
554 return -EINVAL;
555 if (!irq_params->handler)
556 return -EINVAL;
557 return irq_params->handler(irq_params->private_data,
558 irq_params->src_id,
559 entry->iv_entry);
560}
561
562static const struct amdgpu_irq_src_funcs cgs_irq_funcs = {
563 .set = cgs_set_irq_state,
564 .process = cgs_process_irq,
565};
566
d03846af
CZ
567static int amdgpu_cgs_add_irq_source(void *cgs_device, unsigned src_id,
568 unsigned num_types,
569 cgs_irq_source_set_func_t set,
570 cgs_irq_handler_func_t handler,
571 void *private_data)
572{
0cf3be21
AD
573 CGS_FUNC_ADEV;
574 int ret = 0;
575 struct cgs_irq_params *irq_params;
576 struct amdgpu_irq_src *source =
577 kzalloc(sizeof(struct amdgpu_irq_src), GFP_KERNEL);
578 if (!source)
579 return -ENOMEM;
580 irq_params =
581 kzalloc(sizeof(struct cgs_irq_params), GFP_KERNEL);
582 if (!irq_params) {
583 kfree(source);
584 return -ENOMEM;
585 }
586 source->num_types = num_types;
587 source->funcs = &cgs_irq_funcs;
588 irq_params->src_id = src_id;
589 irq_params->set = set;
590 irq_params->handler = handler;
591 irq_params->private_data = private_data;
592 source->data = (void *)irq_params;
593 ret = amdgpu_irq_add_id(adev, src_id, source);
594 if (ret) {
595 kfree(irq_params);
596 kfree(source);
597 }
598
599 return ret;
d03846af
CZ
600}
601
602static int amdgpu_cgs_irq_get(void *cgs_device, unsigned src_id, unsigned type)
603{
0cf3be21
AD
604 CGS_FUNC_ADEV;
605 return amdgpu_irq_get(adev, adev->irq.sources[src_id], type);
d03846af
CZ
606}
607
608static int amdgpu_cgs_irq_put(void *cgs_device, unsigned src_id, unsigned type)
609{
0cf3be21
AD
610 CGS_FUNC_ADEV;
611 return amdgpu_irq_put(adev, adev->irq.sources[src_id], type);
d03846af
CZ
612}
613
614static const struct cgs_ops amdgpu_cgs_ops = {
615 amdgpu_cgs_gpu_mem_info,
616 amdgpu_cgs_gmap_kmem,
617 amdgpu_cgs_gunmap_kmem,
618 amdgpu_cgs_alloc_gpu_mem,
619 amdgpu_cgs_free_gpu_mem,
620 amdgpu_cgs_gmap_gpu_mem,
621 amdgpu_cgs_gunmap_gpu_mem,
622 amdgpu_cgs_kmap_gpu_mem,
623 amdgpu_cgs_kunmap_gpu_mem,
624 amdgpu_cgs_read_register,
625 amdgpu_cgs_write_register,
626 amdgpu_cgs_read_ind_register,
627 amdgpu_cgs_write_ind_register,
628 amdgpu_cgs_read_pci_config_byte,
629 amdgpu_cgs_read_pci_config_word,
630 amdgpu_cgs_read_pci_config_dword,
631 amdgpu_cgs_write_pci_config_byte,
632 amdgpu_cgs_write_pci_config_word,
633 amdgpu_cgs_write_pci_config_dword,
634 amdgpu_cgs_atom_get_data_table,
635 amdgpu_cgs_atom_get_cmd_table_revs,
636 amdgpu_cgs_atom_exec_cmd_table,
637 amdgpu_cgs_create_pm_request,
638 amdgpu_cgs_destroy_pm_request,
639 amdgpu_cgs_set_pm_request,
640 amdgpu_cgs_pm_request_clock,
641 amdgpu_cgs_pm_request_engine,
642 amdgpu_cgs_pm_query_clock_limits,
643 amdgpu_cgs_set_camera_voltages
644};
645
646static const struct cgs_os_ops amdgpu_cgs_os_ops = {
647 amdgpu_cgs_import_gpu_mem,
648 amdgpu_cgs_add_irq_source,
649 amdgpu_cgs_irq_get,
650 amdgpu_cgs_irq_put
651};
652
653void *amdgpu_cgs_create_device(struct amdgpu_device *adev)
654{
655 struct amdgpu_cgs_device *cgs_device =
656 kmalloc(sizeof(*cgs_device), GFP_KERNEL);
657
658 if (!cgs_device) {
659 DRM_ERROR("Couldn't allocate CGS device structure\n");
660 return NULL;
661 }
662
663 cgs_device->base.ops = &amdgpu_cgs_ops;
664 cgs_device->base.os_ops = &amdgpu_cgs_os_ops;
665 cgs_device->adev = adev;
666
667 return cgs_device;
668}
669
670void amdgpu_cgs_destroy_device(void *cgs_device)
671{
672 kfree(cgs_device);
673}
This page took 0.050518 seconds and 5 git commands to generate.