2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
27 #include <core/client.h>
28 #include <core/option.h>
29 #include <core/notify.h>
30 #include <core/parent.h>
31 #include <subdev/bios.h>
32 #include <subdev/fb.h>
33 #include <subdev/instmem.h>
35 #include <nvif/class.h>
36 #include <nvif/unpack.h>
38 static DEFINE_MUTEX(nv_devices_mutex
);
39 static LIST_HEAD(nv_devices
);
42 nvkm_device_find(u64 name
)
44 struct nvkm_device
*device
, *match
= NULL
;
45 mutex_lock(&nv_devices_mutex
);
46 list_for_each_entry(device
, &nv_devices
, head
) {
47 if (device
->handle
== name
) {
52 mutex_unlock(&nv_devices_mutex
);
57 nvkm_device_list(u64
*name
, int size
)
59 struct nvkm_device
*device
;
61 mutex_lock(&nv_devices_mutex
);
62 list_for_each_entry(device
, &nv_devices
, head
) {
64 name
[nr
- 1] = device
->handle
;
66 mutex_unlock(&nv_devices_mutex
);
70 /******************************************************************************
71 * nvkm_devobj (0x0080): class implementation
72 *****************************************************************************/
75 struct nvkm_parent base
;
76 struct nvkm_object
*subdev
[NVDEV_SUBDEV_NR
];
80 nvkm_devobj_info(struct nvkm_object
*object
, void *data
, u32 size
)
82 struct nvkm_device
*device
= nv_device(object
);
83 struct nvkm_fb
*fb
= nvkm_fb(device
);
84 struct nvkm_instmem
*imem
= nvkm_instmem(device
);
86 struct nv_device_info_v0 v0
;
90 nvif_ioctl(object
, "device info size %d\n", size
);
91 if (nvif_unpack(args
->v0
, 0, 0, false)) {
92 nvif_ioctl(object
, "device info vers %d\n", args
->v0
.version
);
96 switch (device
->chipset
) {
107 args
->v0
.platform
= NV_DEVICE_INFO_V0_IGP
;
111 if (pci_find_capability(device
->pdev
, PCI_CAP_ID_AGP
))
112 args
->v0
.platform
= NV_DEVICE_INFO_V0_AGP
;
114 if (pci_is_pcie(device
->pdev
))
115 args
->v0
.platform
= NV_DEVICE_INFO_V0_PCIE
;
117 args
->v0
.platform
= NV_DEVICE_INFO_V0_PCI
;
119 args
->v0
.platform
= NV_DEVICE_INFO_V0_SOC
;
124 switch (device
->card_type
) {
125 case NV_04
: args
->v0
.family
= NV_DEVICE_INFO_V0_TNT
; break;
127 case NV_11
: args
->v0
.family
= NV_DEVICE_INFO_V0_CELSIUS
; break;
128 case NV_20
: args
->v0
.family
= NV_DEVICE_INFO_V0_KELVIN
; break;
129 case NV_30
: args
->v0
.family
= NV_DEVICE_INFO_V0_RANKINE
; break;
130 case NV_40
: args
->v0
.family
= NV_DEVICE_INFO_V0_CURIE
; break;
131 case NV_50
: args
->v0
.family
= NV_DEVICE_INFO_V0_TESLA
; break;
132 case NV_C0
: args
->v0
.family
= NV_DEVICE_INFO_V0_FERMI
; break;
133 case NV_E0
: args
->v0
.family
= NV_DEVICE_INFO_V0_KEPLER
; break;
134 case GM100
: args
->v0
.family
= NV_DEVICE_INFO_V0_MAXWELL
; break;
140 args
->v0
.chipset
= device
->chipset
;
141 args
->v0
.revision
= device
->chiprev
;
143 args
->v0
.ram_size
= args
->v0
.ram_user
= fb
->ram
->size
;
145 args
->v0
.ram_size
= args
->v0
.ram_user
= 0;
146 if (imem
&& args
->v0
.ram_size
> 0)
147 args
->v0
.ram_user
= args
->v0
.ram_user
- imem
->reserved
;
153 nvkm_devobj_mthd(struct nvkm_object
*object
, u32 mthd
, void *data
, u32 size
)
156 case NV_DEVICE_V0_INFO
:
157 return nvkm_devobj_info(object
, data
, size
);
165 nvkm_devobj_rd08(struct nvkm_object
*object
, u64 addr
)
167 return nvkm_rd08(object
->engine
->subdev
.device
, addr
);
171 nvkm_devobj_rd16(struct nvkm_object
*object
, u64 addr
)
173 return nvkm_rd16(object
->engine
->subdev
.device
, addr
);
177 nvkm_devobj_rd32(struct nvkm_object
*object
, u64 addr
)
179 return nvkm_rd32(object
->engine
->subdev
.device
, addr
);
183 nvkm_devobj_wr08(struct nvkm_object
*object
, u64 addr
, u8 data
)
185 nvkm_wr08(object
->engine
->subdev
.device
, addr
, data
);
189 nvkm_devobj_wr16(struct nvkm_object
*object
, u64 addr
, u16 data
)
191 nvkm_wr16(object
->engine
->subdev
.device
, addr
, data
);
195 nvkm_devobj_wr32(struct nvkm_object
*object
, u64 addr
, u32 data
)
197 nvkm_wr32(object
->engine
->subdev
.device
, addr
, data
);
201 nvkm_devobj_map(struct nvkm_object
*object
, u64
*addr
, u32
*size
)
203 struct nvkm_device
*device
= nv_device(object
);
204 *addr
= nv_device_resource_start(device
, 0);
205 *size
= nv_device_resource_len(device
, 0);
209 static const u64 disable_map
[] = {
210 [NVDEV_SUBDEV_VBIOS
] = NV_DEVICE_V0_DISABLE_VBIOS
,
211 [NVDEV_SUBDEV_DEVINIT
] = NV_DEVICE_V0_DISABLE_CORE
,
212 [NVDEV_SUBDEV_GPIO
] = NV_DEVICE_V0_DISABLE_CORE
,
213 [NVDEV_SUBDEV_I2C
] = NV_DEVICE_V0_DISABLE_CORE
,
214 [NVDEV_SUBDEV_CLK
] = NV_DEVICE_V0_DISABLE_CORE
,
215 [NVDEV_SUBDEV_MXM
] = NV_DEVICE_V0_DISABLE_CORE
,
216 [NVDEV_SUBDEV_MC
] = NV_DEVICE_V0_DISABLE_CORE
,
217 [NVDEV_SUBDEV_BUS
] = NV_DEVICE_V0_DISABLE_CORE
,
218 [NVDEV_SUBDEV_TIMER
] = NV_DEVICE_V0_DISABLE_CORE
,
219 [NVDEV_SUBDEV_FB
] = NV_DEVICE_V0_DISABLE_CORE
,
220 [NVDEV_SUBDEV_LTC
] = NV_DEVICE_V0_DISABLE_CORE
,
221 [NVDEV_SUBDEV_IBUS
] = NV_DEVICE_V0_DISABLE_CORE
,
222 [NVDEV_SUBDEV_INSTMEM
] = NV_DEVICE_V0_DISABLE_CORE
,
223 [NVDEV_SUBDEV_MMU
] = NV_DEVICE_V0_DISABLE_CORE
,
224 [NVDEV_SUBDEV_BAR
] = NV_DEVICE_V0_DISABLE_CORE
,
225 [NVDEV_SUBDEV_VOLT
] = NV_DEVICE_V0_DISABLE_CORE
,
226 [NVDEV_SUBDEV_THERM
] = NV_DEVICE_V0_DISABLE_CORE
,
227 [NVDEV_SUBDEV_PMU
] = NV_DEVICE_V0_DISABLE_CORE
,
228 [NVDEV_SUBDEV_FUSE
] = NV_DEVICE_V0_DISABLE_CORE
,
229 [NVDEV_ENGINE_DMAOBJ
] = NV_DEVICE_V0_DISABLE_CORE
,
230 [NVDEV_ENGINE_PM
] = NV_DEVICE_V0_DISABLE_CORE
,
231 [NVDEV_ENGINE_FIFO
] = NV_DEVICE_V0_DISABLE_FIFO
,
232 [NVDEV_ENGINE_SW
] = NV_DEVICE_V0_DISABLE_FIFO
,
233 [NVDEV_ENGINE_GR
] = NV_DEVICE_V0_DISABLE_GR
,
234 [NVDEV_ENGINE_MPEG
] = NV_DEVICE_V0_DISABLE_MPEG
,
235 [NVDEV_ENGINE_ME
] = NV_DEVICE_V0_DISABLE_ME
,
236 [NVDEV_ENGINE_VP
] = NV_DEVICE_V0_DISABLE_VP
,
237 [NVDEV_ENGINE_CIPHER
] = NV_DEVICE_V0_DISABLE_CIPHER
,
238 [NVDEV_ENGINE_BSP
] = NV_DEVICE_V0_DISABLE_BSP
,
239 [NVDEV_ENGINE_MSPPP
] = NV_DEVICE_V0_DISABLE_MSPPP
,
240 [NVDEV_ENGINE_CE0
] = NV_DEVICE_V0_DISABLE_CE0
,
241 [NVDEV_ENGINE_CE1
] = NV_DEVICE_V0_DISABLE_CE1
,
242 [NVDEV_ENGINE_CE2
] = NV_DEVICE_V0_DISABLE_CE2
,
243 [NVDEV_ENGINE_VIC
] = NV_DEVICE_V0_DISABLE_VIC
,
244 [NVDEV_ENGINE_MSENC
] = NV_DEVICE_V0_DISABLE_MSENC
,
245 [NVDEV_ENGINE_DISP
] = NV_DEVICE_V0_DISABLE_DISP
,
246 [NVDEV_ENGINE_MSVLD
] = NV_DEVICE_V0_DISABLE_MSVLD
,
247 [NVDEV_ENGINE_SEC
] = NV_DEVICE_V0_DISABLE_SEC
,
248 [NVDEV_SUBDEV_NR
] = 0,
252 nvkm_devobj_dtor(struct nvkm_object
*object
)
254 struct nvkm_devobj
*devobj
= (void *)object
;
257 for (i
= NVDEV_SUBDEV_NR
- 1; i
>= 0; i
--)
258 nvkm_object_ref(NULL
, &devobj
->subdev
[i
]);
260 nvkm_parent_destroy(&devobj
->base
);
263 static struct nvkm_oclass
264 nvkm_devobj_oclass_super
= {
266 .ofuncs
= &(struct nvkm_ofuncs
) {
267 .dtor
= nvkm_devobj_dtor
,
268 .init
= _nvkm_parent_init
,
269 .fini
= _nvkm_parent_fini
,
270 .mthd
= nvkm_devobj_mthd
,
271 .map
= nvkm_devobj_map
,
272 .rd08
= nvkm_devobj_rd08
,
273 .rd16
= nvkm_devobj_rd16
,
274 .rd32
= nvkm_devobj_rd32
,
275 .wr08
= nvkm_devobj_wr08
,
276 .wr16
= nvkm_devobj_wr16
,
277 .wr32
= nvkm_devobj_wr32
,
282 nvkm_devobj_ctor(struct nvkm_object
*parent
, struct nvkm_object
*engine
,
283 struct nvkm_oclass
*oclass
, void *data
, u32 size
,
284 struct nvkm_object
**pobject
)
287 struct nv_device_v0 v0
;
289 struct nvkm_client
*client
= nv_client(parent
);
290 struct nvkm_device
*device
;
291 struct nvkm_devobj
*devobj
;
293 u64 disable
, mmio_base
, mmio_size
;
297 nvif_ioctl(parent
, "create device size %d\n", size
);
298 if (nvif_unpack(args
->v0
, 0, 0, false)) {
299 nvif_ioctl(parent
, "create device v%d device %016llx "
300 "disable %016llx debug0 %016llx\n",
301 args
->v0
.version
, args
->v0
.device
,
302 args
->v0
.disable
, args
->v0
.debug0
);
306 /* give priviledged clients register access */
308 oclass
= &nvkm_devobj_oclass_super
;
310 /* find the device subdev that matches what the client requested */
311 device
= client
->device
;
312 if (args
->v0
.device
!= ~0) {
313 device
= nvkm_device_find(args
->v0
.device
);
318 ret
= nvkm_parent_create(parent
, nv_object(device
), oclass
, 0,
320 (1ULL << NVDEV_ENGINE_DMAOBJ
) |
321 (1ULL << NVDEV_ENGINE_FIFO
) |
322 (1ULL << NVDEV_ENGINE_DISP
) |
323 (1ULL << NVDEV_ENGINE_PM
), &devobj
);
324 *pobject
= nv_object(devobj
);
328 mmio_base
= nv_device_resource_start(device
, 0);
329 mmio_size
= nv_device_resource_len(device
, 0);
331 /* translate api disable mask into internal mapping */
332 disable
= args
->v0
.debug0
;
333 for (i
= 0; i
< NVDEV_SUBDEV_NR
; i
++) {
334 if (args
->v0
.disable
& disable_map
[i
])
335 disable
|= (1ULL << i
);
338 /* identify the chipset, and determine classes of subdev/engines */
339 if (!(args
->v0
.disable
& NV_DEVICE_V0_DISABLE_IDENTIFY
) &&
340 !device
->card_type
) {
341 map
= ioremap(mmio_base
, 0x102000);
345 /* switch mmio to cpu's native endianness */
347 if (ioread32_native(map
+ 0x000004) != 0x00000000) {
349 if (ioread32_native(map
+ 0x000004) == 0x00000000) {
351 iowrite32_native(0x01000001, map
+ 0x000004);
352 ioread32_native(map
);
355 /* read boot0 and strapping information */
356 boot0
= ioread32_native(map
+ 0x000000);
357 strap
= ioread32_native(map
+ 0x101000);
360 /* determine chipset and derive architecture from it */
361 if ((boot0
& 0x1f000000) > 0) {
362 device
->chipset
= (boot0
& 0x1ff00000) >> 20;
363 device
->chiprev
= (boot0
& 0x000000ff);
364 switch (device
->chipset
& 0x1f0) {
366 if (0x461 & (1 << (device
->chipset
& 0xf)))
367 device
->card_type
= NV_10
;
369 device
->card_type
= NV_11
;
370 device
->chiprev
= 0x00;
373 case 0x020: device
->card_type
= NV_20
; break;
374 case 0x030: device
->card_type
= NV_30
; break;
376 case 0x060: device
->card_type
= NV_40
; break;
380 case 0x0a0: device
->card_type
= NV_50
; break;
382 case 0x0d0: device
->card_type
= NV_C0
; break;
385 case 0x100: device
->card_type
= NV_E0
; break;
387 case 0x120: device
->card_type
= GM100
; break;
392 if ((boot0
& 0xff00fff0) == 0x20004000) {
393 if (boot0
& 0x00f00000)
394 device
->chipset
= 0x05;
396 device
->chipset
= 0x04;
397 device
->card_type
= NV_04
;
400 switch (device
->card_type
) {
401 case NV_04
: ret
= nv04_identify(device
); break;
403 case NV_11
: ret
= nv10_identify(device
); break;
404 case NV_20
: ret
= nv20_identify(device
); break;
405 case NV_30
: ret
= nv30_identify(device
); break;
406 case NV_40
: ret
= nv40_identify(device
); break;
407 case NV_50
: ret
= nv50_identify(device
); break;
408 case NV_C0
: ret
= gf100_identify(device
); break;
409 case NV_E0
: ret
= gk104_identify(device
); break;
410 case GM100
: ret
= gm100_identify(device
); break;
417 nvdev_error(device
, "unknown chipset (%08x)\n", boot0
);
421 nvdev_info(device
, "NVIDIA %s (%08x)\n", device
->cname
, boot0
);
423 /* determine frequency of timing crystal */
424 if ( device
->card_type
<= NV_10
|| device
->chipset
< 0x17 ||
425 (device
->chipset
>= 0x20 && device
->chipset
< 0x25))
431 case 0x00000000: device
->crystal
= 13500; break;
432 case 0x00000040: device
->crystal
= 14318; break;
433 case 0x00400000: device
->crystal
= 27000; break;
434 case 0x00400040: device
->crystal
= 25000; break;
437 if ( (args
->v0
.disable
& NV_DEVICE_V0_DISABLE_IDENTIFY
)) {
438 device
->cname
= "NULL";
439 device
->oclass
[NVDEV_SUBDEV_VBIOS
] = &nvkm_bios_oclass
;
442 if (!(args
->v0
.disable
& NV_DEVICE_V0_DISABLE_MMIO
) && !device
->pri
) {
443 device
->pri
= ioremap(mmio_base
, mmio_size
);
445 nvdev_error(device
, "unable to map PRI\n");
450 /* ensure requested subsystems are available for use */
451 for (i
= 1, c
= 1; i
< NVDEV_SUBDEV_NR
; i
++) {
452 if (!(oclass
= device
->oclass
[i
]) || (disable
& (1ULL << i
)))
455 if (device
->subdev
[i
]) {
456 nvkm_object_ref(device
->subdev
[i
], &devobj
->subdev
[i
]);
460 #define _(s,m) case s: \
461 ret = nvkm_object_ctor(nv_object(device), NULL, oclass, NULL, \
462 (s), (struct nvkm_object **)&device->m);\
463 if (ret == -ENODEV) \
467 devobj->subdev[s] = (struct nvkm_object *)device->m; \
468 device->subdev[s] = devobj->subdev[s]; \
472 _(NVDEV_SUBDEV_BAR
, bar
);
473 _(NVDEV_SUBDEV_VBIOS
, bios
);
474 _(NVDEV_SUBDEV_BUS
, bus
);
475 _(NVDEV_SUBDEV_CLK
, clk
);
476 _(NVDEV_SUBDEV_DEVINIT
, devinit
);
477 _(NVDEV_SUBDEV_FB
, fb
);
478 _(NVDEV_SUBDEV_FUSE
, fuse
);
479 _(NVDEV_SUBDEV_GPIO
, gpio
);
480 _(NVDEV_SUBDEV_I2C
, i2c
);
481 _(NVDEV_SUBDEV_IBUS
, ibus
);
482 _(NVDEV_SUBDEV_INSTMEM
, imem
);
483 _(NVDEV_SUBDEV_LTC
, ltc
);
484 _(NVDEV_SUBDEV_MC
, mc
);
485 _(NVDEV_SUBDEV_MMU
, mmu
);
486 _(NVDEV_SUBDEV_MXM
, mxm
);
487 _(NVDEV_SUBDEV_PMU
, pmu
);
488 _(NVDEV_SUBDEV_THERM
, therm
);
489 _(NVDEV_SUBDEV_TIMER
, timer
);
490 _(NVDEV_SUBDEV_VOLT
, volt
);
491 _(NVDEV_ENGINE_BSP
, bsp
);
492 _(NVDEV_ENGINE_CE0
, ce
[0]);
493 _(NVDEV_ENGINE_CE1
, ce
[1]);
494 _(NVDEV_ENGINE_CE2
, ce
[2]);
495 _(NVDEV_ENGINE_CIPHER
, cipher
);
496 _(NVDEV_ENGINE_DISP
, disp
);
497 _(NVDEV_ENGINE_DMAOBJ
, dma
);
498 _(NVDEV_ENGINE_FIFO
, fifo
);
499 _(NVDEV_ENGINE_GR
, gr
);
500 _(NVDEV_ENGINE_IFB
, ifb
);
501 _(NVDEV_ENGINE_ME
, me
);
502 _(NVDEV_ENGINE_MPEG
, mpeg
);
503 _(NVDEV_ENGINE_MSENC
, msenc
);
504 _(NVDEV_ENGINE_MSPDEC
, mspdec
);
505 _(NVDEV_ENGINE_MSPPP
, msppp
);
506 _(NVDEV_ENGINE_MSVLD
, msvld
);
507 _(NVDEV_ENGINE_PM
, pm
);
508 _(NVDEV_ENGINE_SEC
, sec
);
509 _(NVDEV_ENGINE_SW
, sw
);
510 _(NVDEV_ENGINE_VIC
, vic
);
511 _(NVDEV_ENGINE_VP
, vp
);
518 /* note: can't init *any* subdevs until devinit has been run
519 * due to not knowing exactly what the vbios init tables will
520 * mess with. devinit also can't be run until all of its
521 * dependencies have been created.
523 * this code delays init of any subdev until all of devinit's
524 * dependencies have been created, and then initialises each
525 * subdev in turn as they're created.
527 while (i
>= NVDEV_SUBDEV_DEVINIT_LAST
&& c
<= i
) {
528 struct nvkm_object
*subdev
= devobj
->subdev
[c
++];
529 if (subdev
&& !nv_iclass(subdev
, NV_ENGINE_CLASS
)) {
530 ret
= nvkm_object_inc(subdev
);
533 atomic_dec(&nv_object(device
)->usecount
);
536 nvkm_subdev_reset(subdev
);
544 static struct nvkm_ofuncs
545 nvkm_devobj_ofuncs
= {
546 .ctor
= nvkm_devobj_ctor
,
547 .dtor
= nvkm_devobj_dtor
,
548 .init
= _nvkm_parent_init
,
549 .fini
= _nvkm_parent_fini
,
550 .mthd
= nvkm_devobj_mthd
,
553 /******************************************************************************
554 * nvkm_device: engine functions
555 *****************************************************************************/
560 struct nvkm_object
*device
= nv_object(obj
);
561 if (device
->engine
== NULL
) {
562 while (device
&& device
->parent
)
563 device
= device
->parent
;
565 device
= &nv_object(obj
)->engine
->subdev
.object
;
566 if (device
&& device
->parent
)
567 device
= device
->parent
;
569 #if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
572 return (void *)device
;
575 static struct nvkm_oclass
576 nvkm_device_sclass
[] = {
577 { 0x0080, &nvkm_devobj_ofuncs
},
582 nvkm_device_event_ctor(struct nvkm_object
*object
, void *data
, u32 size
,
583 struct nvkm_notify
*notify
)
585 if (!WARN_ON(size
!= 0)) {
594 static const struct nvkm_event_func
595 nvkm_device_event_func
= {
596 .ctor
= nvkm_device_event_ctor
,
600 nvkm_device_fini(struct nvkm_object
*object
, bool suspend
)
602 struct nvkm_device
*device
= (void *)object
;
603 struct nvkm_object
*subdev
;
606 for (i
= NVDEV_SUBDEV_NR
- 1; i
>= 0; i
--) {
607 if ((subdev
= device
->subdev
[i
])) {
608 if (!nv_iclass(subdev
, NV_ENGINE_CLASS
)) {
609 ret
= nvkm_object_dec(subdev
, suspend
);
616 ret
= nvkm_acpi_fini(device
, suspend
);
618 for (; ret
&& i
< NVDEV_SUBDEV_NR
; i
++) {
619 if ((subdev
= device
->subdev
[i
])) {
620 if (!nv_iclass(subdev
, NV_ENGINE_CLASS
)) {
621 ret
= nvkm_object_inc(subdev
);
633 nvkm_device_init(struct nvkm_object
*object
)
635 struct nvkm_device
*device
= (void *)object
;
636 struct nvkm_object
*subdev
;
639 ret
= nvkm_acpi_init(device
);
643 for (i
= 0; i
< NVDEV_SUBDEV_NR
; i
++) {
644 if ((subdev
= device
->subdev
[i
])) {
645 if (!nv_iclass(subdev
, NV_ENGINE_CLASS
)) {
646 ret
= nvkm_object_inc(subdev
);
650 nvkm_subdev_reset(subdev
);
657 for (--i
; ret
&& i
>= 0; i
--) {
658 if ((subdev
= device
->subdev
[i
])) {
659 if (!nv_iclass(subdev
, NV_ENGINE_CLASS
))
660 nvkm_object_dec(subdev
, false);
665 nvkm_acpi_fini(device
, false);
670 nv_device_resource_start(struct nvkm_device
*device
, unsigned int bar
)
672 if (nv_device_is_pci(device
)) {
673 return pci_resource_start(device
->pdev
, bar
);
675 struct resource
*res
;
676 res
= platform_get_resource(device
->platformdev
,
677 IORESOURCE_MEM
, bar
);
685 nv_device_resource_len(struct nvkm_device
*device
, unsigned int bar
)
687 if (nv_device_is_pci(device
)) {
688 return pci_resource_len(device
->pdev
, bar
);
690 struct resource
*res
;
691 res
= platform_get_resource(device
->platformdev
,
692 IORESOURCE_MEM
, bar
);
695 return resource_size(res
);
700 nv_device_get_irq(struct nvkm_device
*device
, bool stall
)
702 if (nv_device_is_pci(device
)) {
703 return device
->pdev
->irq
;
705 return platform_get_irq_byname(device
->platformdev
,
706 stall
? "stall" : "nonstall");
710 static struct nvkm_oclass
711 nvkm_device_oclass
= {
712 .handle
= NV_ENGINE(DEVICE
, 0x00),
713 .ofuncs
= &(struct nvkm_ofuncs
) {
714 .init
= nvkm_device_init
,
715 .fini
= nvkm_device_fini
,
720 nvkm_device_del(struct nvkm_device
**pdevice
)
722 struct nvkm_device
*device
= *pdevice
;
724 nvkm_event_fini(&device
->event
);
726 mutex_lock(&nv_devices_mutex
);
727 list_del(&device
->head
);
728 mutex_unlock(&nv_devices_mutex
);
731 iounmap(device
->pri
);
733 nvkm_engine_destroy(&device
->engine
);
739 nvkm_device_new(void *dev
, enum nv_bus_type type
, u64 name
,
740 const char *sname
, const char *cfg
, const char *dbg
,
741 struct nvkm_device
**pdevice
)
743 struct nvkm_device
*device
;
746 mutex_lock(&nv_devices_mutex
);
747 list_for_each_entry(device
, &nv_devices
, head
) {
748 if (device
->handle
== name
)
752 ret
= nvkm_engine_create(NULL
, NULL
, &nvkm_device_oclass
, true,
753 "DEVICE", "device", &device
);
761 device
->dev
= &device
->pdev
->dev
;
763 case NVKM_BUS_PLATFORM
:
764 device
->platformdev
= dev
;
765 device
->dev
= &device
->platformdev
->dev
;
768 device
->handle
= name
;
769 device
->cfgopt
= cfg
;
770 device
->dbgopt
= dbg
;
771 device
->name
= sname
;
773 nv_subdev(device
)->debug
= nvkm_dbgopt(device
->dbgopt
, "DEVICE");
774 nv_engine(device
)->sclass
= nvkm_device_sclass
;
775 list_add_tail(&device
->head
, &nv_devices
);
777 ret
= nvkm_event_init(&nvkm_device_event_func
, 1, 1, &device
->event
);
779 mutex_unlock(&nv_devices_mutex
);